code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A ( _a , unittest.TestCase ): lowerCAmelCase_ : Dict = RobertaTokenizer lowerCAmelCase_ : Any = RobertaTokenizerFast lowerCAmelCase_ : List[str] = True lowerCAmelCase_ : Optional[Any] = {"cls_token": "<s>"} def lowercase__ ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase : List[str] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] lowerCAmelCase : List[Any] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) lowerCAmelCase : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] lowerCAmelCase : List[Any] = {'unk_token': '<unk>'} lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowercase__ ) ) def lowercase__ ( self : Dict , **UpperCAmelCase_ : List[str] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def lowercase__ ( self : Union[str, Any] , **UpperCAmelCase_ : Dict ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ ) def lowercase__ ( self : List[str] , UpperCAmelCase_ : Dict ): lowerCAmelCase : Optional[int] = 'lower newer' lowerCAmelCase : List[Any] = 'lower newer' return input_text, output_text def lowercase__ ( self : Optional[int] ): lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase : Tuple = 'lower newer' lowerCAmelCase : Any = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] lowerCAmelCase : Any = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True) self.assertListEqual(lowercase__ , lowercase__ ) lowerCAmelCase : List[str] = tokens + [tokenizer.unk_token] lowerCAmelCase : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ ) def lowercase__ ( self : Any ): lowerCAmelCase : Tuple = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowercase__ ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowercase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def lowercase__ ( self : Optional[int] ): lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained('roberta-base' ) lowerCAmelCase : Any = tokenizer.encode('sequence builders' , add_special_tokens=lowercase__ ) lowerCAmelCase : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase__ ) lowerCAmelCase : Any = tokenizer.encode( 'sequence builders' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) lowerCAmelCase : str = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase__ ) lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowercase__ ( self : int ): lowerCAmelCase : Union[str, Any] = self.get_tokenizer() lowerCAmelCase : Optional[int] = 'Encode this sequence.' lowerCAmelCase : List[Any] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments lowerCAmelCase : Dict = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) lowerCAmelCase : str = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowercase__ , lowercase__ ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) lowerCAmelCase : Union[str, Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) # Testing spaces after special tokens lowerCAmelCase : List[str] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(lowercase__ ) lowerCAmelCase : Tuple = 'Encode <mask> sequence' lowerCAmelCase : str = 'Encode <mask>sequence' lowerCAmelCase : str = tokenizer.encode(lowercase__ ) lowerCAmelCase : Optional[int] = encoded.index(lowercase__ ) lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowercase__ , lowercase__ ) lowerCAmelCase : List[Any] = tokenizer.encode(lowercase__ ) lowerCAmelCase : Optional[Any] = encoded.index(lowercase__ ) lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) def lowercase__ ( self : int ): pass def lowercase__ ( self : str ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) lowerCAmelCase : Union[str, Any] = 'A, <mask> AllenNLP sentence.' lowerCAmelCase : Dict = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) lowerCAmelCase : List[Any] = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) lowerCAmelCase : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) lowerCAmelCase : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( lowercase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( lowercase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def lowercase__ ( self : List[Any] ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) lowerCAmelCase : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowerCAmelCase : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowercase__ ) self.assertEqual(post_processor_state['add_prefix_space'] , lowercase__ ) self.assertEqual(post_processor_state['trim_offsets'] , lowercase__ ) def lowercase__ ( self : Tuple ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase : Dict = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` lowerCAmelCase : List[str] = f"{text_of_1_token} {text_of_1_token}" lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) lowerCAmelCase : Dict = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) lowerCAmelCase : Optional[Any] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) lowerCAmelCase : int = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) lowerCAmelCase : List[Any] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) lowerCAmelCase : Optional[int] = f" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) lowerCAmelCase : str = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) lowerCAmelCase : Tuple = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) lowerCAmelCase : Optional[Any] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
138
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ ( _a , unittest.TestCase ): '''simple docstring''' a__ = LongformerTokenizer a__ = True a__ = LongformerTokenizerFast a__ = True def lowerCAmelCase_ (self ) -> Any: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) __UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __UpperCAmelCase = {'''unk_token''': '''<unk>'''} __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase__ ) ) def lowerCAmelCase_ (self , **lowercase__ ) -> int: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def lowerCAmelCase_ (self , lowercase__ ) -> Dict: __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = '''lower newer''' return input_text, output_text def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True) self.assertListEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokens + [tokenizer.unk_token] __UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ ) def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' ) __UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowerCAmelCase_ (self ) -> Any: __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = '''Encode this sequence.''' __UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowercase__ , lowercase__ ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) # Testing spaces after special tokens __UpperCAmelCase = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space __UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ ) __UpperCAmelCase = '''Encode <mask> sequence''' __UpperCAmelCase = '''Encode <mask>sequence''' __UpperCAmelCase = tokenizer.encode(lowercase__ ) __UpperCAmelCase = encoded.index(lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokenizer.encode(lowercase__ ) __UpperCAmelCase = encoded.index(lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: pass def lowerCAmelCase_ (self ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __UpperCAmelCase = '''A, <mask> AllenNLP sentence.''' __UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) __UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def lowerCAmelCase_ (self ) -> Optional[int]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ ) self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ ) self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}''' __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
333
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : str = logging.get_logger(__name__) UpperCAmelCase : Any = { 'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json', 'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json', 'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json', 'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json', 'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json', 'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json', 'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json', 'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json', 'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json', 'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json', } class lowerCamelCase__ ( _a ): """simple docstring""" __a = """rwkv""" __a = {"""max_position_embeddings""": """context_length"""} def __init__( self : Union[str, Any] , UpperCamelCase : Optional[Any]=50_277 , UpperCamelCase : List[str]=1_024 , UpperCamelCase : str=4_096 , UpperCamelCase : List[str]=32 , UpperCamelCase : Tuple=None , UpperCamelCase : List[str]=None , UpperCamelCase : Any=1e-5 , UpperCamelCase : int=0 , UpperCamelCase : str=0 , UpperCamelCase : Any=6 , UpperCamelCase : List[Any]=False , UpperCamelCase : Union[str, Any]=True , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Dict = vocab_size __UpperCAmelCase : Any = context_length __UpperCAmelCase : List[str] = hidden_size __UpperCAmelCase : int = num_hidden_layers __UpperCAmelCase : Tuple = attention_hidden_size if attention_hidden_size is not None else hidden_size __UpperCAmelCase : int = intermediate_size if intermediate_size is not None else 4 * hidden_size __UpperCAmelCase : List[Any] = layer_norm_epsilon __UpperCAmelCase : Any = rescale_every __UpperCAmelCase : Union[str, Any] = use_cache __UpperCAmelCase : Any = bos_token_id __UpperCAmelCase : Dict = eos_token_id super().__init__( tie_word_embeddings=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
115
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class A_ ( _a ): '''simple docstring''' a__ = (IPNDMScheduler,) a__ = (("num_inference_steps", 50),) def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple: __UpperCAmelCase = {'''num_train_timesteps''': 1_000} config.update(**lowercase__ ) return config def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample __UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config(**lowercase__ ) __UpperCAmelCase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals __UpperCAmelCase = dummy_past_residuals[:] if time_step is None: __UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase__ ) __UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ ) new_scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals __UpperCAmelCase = dummy_past_residuals[:] __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase_ (self ) -> List[str]: pass def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample __UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config() __UpperCAmelCase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals (must be after setting timesteps) __UpperCAmelCase = dummy_past_residuals[:] if time_step is None: __UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase__ ) __UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase__ ) # copy over dummy past residual (must be after setting timesteps) __UpperCAmelCase = dummy_past_residuals[:] __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]: __UpperCAmelCase = self.scheduler_classes[0] __UpperCAmelCase = self.get_scheduler_config(**lowercase__ ) __UpperCAmelCase = scheduler_class(**lowercase__ ) __UpperCAmelCase = 10 __UpperCAmelCase = self.dummy_model() __UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(lowercase__ ) for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase = model(lowercase__ , lowercase__ ) __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase = model(lowercase__ , lowercase__ ) __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample return sample def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ ) for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config() __UpperCAmelCase = scheduler_class(**lowercase__ ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ): scheduler.set_timesteps(lowercase__ ) elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ): __UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __UpperCAmelCase = dummy_past_residuals[:] __UpperCAmelCase = scheduler.timesteps[5] __UpperCAmelCase = scheduler.timesteps[6] __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCAmelCase_ (self ) -> List[Any]: for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ ) def lowerCAmelCase_ (self ) -> str: __UpperCAmelCase = self.full_loop() __UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
333
0
'''simple docstring''' import sys import turtle def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Tuple ): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: Optional[int] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase ) , get_mid(_lowerCamelCase , _lowerCamelCase ) , depth - 1 ) triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase ) , get_mid(_lowerCamelCase , _lowerCamelCase ) , depth - 1 ) triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase ) , get_mid(_lowerCamelCase , _lowerCamelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) UpperCamelCase__ : Optional[Any] = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') UpperCamelCase__ : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
112
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A_ : '''simple docstring''' def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int: __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = num_channels __UpperCAmelCase = is_training __UpperCAmelCase = use_labels __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = num_labels __UpperCAmelCase = image_size __UpperCAmelCase = layer_depths __UpperCAmelCase = embed_dims def lowerCAmelCase_ (self ) -> str: __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ (self ) -> Optional[Any]: return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , ) def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int: __UpperCAmelCase = SwiftFormerModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() __UpperCAmelCase = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]: __UpperCAmelCase = self.num_labels __UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __UpperCAmelCase = model(lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) __UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = model(lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ (self ) -> Optional[int]: ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs() __UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A_ ( _a , _a , unittest.TestCase ): '''simple docstring''' a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () a__ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def lowerCAmelCase_ (self ) -> List[str]: __UpperCAmelCase = SwiftFormerModelTester(self ) __UpperCAmelCase = ConfigTester( self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def lowerCAmelCase_ (self ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' ) def lowerCAmelCase_ (self ) -> List[Any]: pass def lowerCAmelCase_ (self ) -> Any: __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(lowercase__ ) __UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(lowercase__ ) __UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase = [*signature.parameters.keys()] __UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase__ ) @slow def lowerCAmelCase_ (self ) -> Any: for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) @unittest.skip(reason='''SwiftFormer does not output attentions''' ) def lowerCAmelCase_ (self ) -> List[str]: pass def lowerCAmelCase_ (self ) -> Union[str, Any]: def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ): __UpperCAmelCase = model_class(lowercase__ ) model.to(lowercase__ ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) ) __UpperCAmelCase = outputs.hidden_states __UpperCAmelCase = 8 self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(lowercase__ ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = True check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: def _config_zero_init(lowercase__ ): __UpperCAmelCase = copy.deepcopy(lowercase__ ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(lowercase__ , lowercase__ , 1E-10 ) if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ): __UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) ) setattr(lowercase__ , lowercase__ , lowercase__ ) return configs_no_init __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = _config_zero_init(lowercase__ ) for model_class in self.all_model_classes: __UpperCAmelCase = model_class(config=lowercase__ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowerCAmelCase_ (self ) -> Optional[Any]: pass def __a ( ) -> Any: '''simple docstring''' __UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class A_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ (self ) -> str: return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None @slow def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ ) __UpperCAmelCase = self.default_image_processor __UpperCAmelCase = prepare_img() __UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ ) # forward pass with torch.no_grad(): __UpperCAmelCase = model(**lowercase__ ) # verify the logits __UpperCAmelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowercase__ ) __UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
333
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class snake_case_ ( _a ): __A : int = "facebook/bart-large-mnli" __A : str = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) __A : List[Any] = "text_classifier" __A : str = AutoTokenizer __A : Any = AutoModelForSequenceClassification __A : Optional[Any] = ["text", ["text"]] __A : str = ["text"] def __UpperCamelCase ( self : Optional[int] ) -> str: super().setup() lowercase__ : Any = self.model.config lowercase__ : Union[str, Any] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): lowercase__ : str = int(lowercase__ ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : str , lowercase_ : List[str] ) -> Any: lowercase__ : List[Any] = labels return self.pre_processor( [text] * len(lowercase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Dict ) -> Optional[int]: lowercase__ : List[str] = outputs.logits lowercase__ : Union[str, Any] = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
87
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES A_ : str = logging.get_logger(__name__) A_ : str = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) A_ : Optional[int] = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) A_ : Union[str, Any] = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) A_ : Dict = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) A_ : Optional[int] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) A_ : Dict = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) A_ : List[str] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) A_ : Tuple = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) A_ : Optional[int] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) A_ : int = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) A_ : Tuple = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) A_ : Tuple = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) A_ : int = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) A_ : Tuple = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) A_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) A_ : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) A_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) A_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) A_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) A_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) A_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) A_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) A_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_MAPPING A_ : Tuple = auto_class_update(FlaxAutoModel) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING A_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING A_ : Tuple = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING A_ : Dict = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING A_ : Tuple = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING A_ : int = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING A_ : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
333
0
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class UpperCAmelCase_ ( _a ): '''simple docstring''' def __init__( self , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = params __SCREAMING_SNAKE_CASE = np.array(lowercase__ ) __SCREAMING_SNAKE_CASE = np.array([len(lowercase__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , _A ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self ): '''simple docstring''' return len(self.lengths ) def _A ( self ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.params.max_model_input_size __SCREAMING_SNAKE_CASE = self.lengths > max_len logger.info(f"""Splitting {sum(lowercase__ )} too long sequences.""" ) def divide_chunks(_A , _A ): return [l[i : i + n] for i in range(0 , len(lowercase__ ) , lowercase__ )] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] if self.params.mlm: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __SCREAMING_SNAKE_CASE = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __SCREAMING_SNAKE_CASE = np.insert(lowercase__ , 0 , lowercase__ ) if sub_s[-1] != sep_id: __SCREAMING_SNAKE_CASE = np.insert(lowercase__ , len(lowercase__ ) , lowercase__ ) assert len(lowercase__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(lowercase__ ) new_tok_ids.extend(lowercase__ ) new_lengths.extend([len(lowercase__ ) for l in sub_seqs] ) __SCREAMING_SNAKE_CASE = np.array(lowercase__ ) __SCREAMING_SNAKE_CASE = np.array(lowercase__ ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(self ) __SCREAMING_SNAKE_CASE = self.lengths > 11 __SCREAMING_SNAKE_CASE = self.token_ids[indices] __SCREAMING_SNAKE_CASE = self.lengths[indices] __SCREAMING_SNAKE_CASE = len(self ) logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def _A ( self ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: __SCREAMING_SNAKE_CASE = self.params.special_tok_ids['unk_token'] __SCREAMING_SNAKE_CASE = len(self ) __SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5 __SCREAMING_SNAKE_CASE = self.token_ids[indices] __SCREAMING_SNAKE_CASE = self.lengths[indices] __SCREAMING_SNAKE_CASE = len(self ) logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def _A ( self ): '''simple docstring''' if not self.params.is_master: return logger.info(f"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _A ( self , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [t[0] for t in batch] __SCREAMING_SNAKE_CASE = [t[1] for t in batch] assert len(lowercase__ ) == len(lowercase__ ) # Max for paddings __SCREAMING_SNAKE_CASE = max(lowercase__ ) # Pad token ids if self.params.mlm: __SCREAMING_SNAKE_CASE = self.params.special_tok_ids['pad_token'] else: __SCREAMING_SNAKE_CASE = self.params.special_tok_ids['unk_token'] __SCREAMING_SNAKE_CASE = [list(t.astype(lowercase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase__ )) for t in token_ids] assert len(tk_ ) == len(lowercase__ ) assert all(len(lowercase__ ) == max_seq_len_ for t in tk_ ) __SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_) __SCREAMING_SNAKE_CASE = torch.tensor(lowercase__ ) # (bs) return tk_t, lg_t
257
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging A_ : Tuple = logging.get_logger(__name__) class A_ ( _a ): '''simple docstring''' a__ = "linear" a__ = "cosine" a__ = "cosine_with_restarts" a__ = "polynomial" a__ = "constant" a__ = "constant_with_warmup" a__ = "piecewise_constant" def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple: '''simple docstring''' return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) ) return 1.0 return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = {} __UpperCAmelCase = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: __UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' ) __UpperCAmelCase = int(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = float(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = value __UpperCAmelCase = float(rule_list[-1] ) def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): def rule_func(SCREAMING_SNAKE_CASE ) -> float: __UpperCAmelCase = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func __UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]: '''simple docstring''' __UpperCAmelCase = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' ) def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: __UpperCAmelCase = lr_init - lr_end __UpperCAmelCase = num_training_steps - num_warmup_steps __UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps __UpperCAmelCase = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , ) return schedule_func( SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
333
0
# Lint as: python3 import itertools import os import re UpperCAmelCase : Optional[int] = re.compile(r"([A-Z]+)([A-Z][a-z])") UpperCAmelCase : Union[str, Any] = re.compile(r"([a-z\d])([A-Z])") UpperCAmelCase : Optional[Any] = re.compile(r"(?<!_)_(?!_)") UpperCAmelCase : Optional[Any] = re.compile(r"(_{2,})") UpperCAmelCase : List[Any] = R'^\w+(\.\w+)*$' UpperCAmelCase : str = R'<>:/\|?*' def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ): '''simple docstring''' lowerCamelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" , lowerCamelCase__ ) lowerCamelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" , lowerCamelCase__ ) return name.lower() def __lowerCamelCase ( lowerCamelCase__ : List[str] ): '''simple docstring''' lowerCamelCase = _single_underscore_re.split(lowerCamelCase__ ) lowerCamelCase = [_multiple_underscores_re.split(lowerCamelCase__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase__ ) if n != """""" ) def __lowerCamelCase ( lowerCamelCase__ : Dict ): '''simple docstring''' if os.path.basename(lowerCamelCase__ ) != name: raise ValueError(f'Should be a dataset name, not a path: {name}' ) return camelcase_to_snakecase(lowerCamelCase__ ) def __lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] ): '''simple docstring''' if os.path.basename(lowerCamelCase__ ) != name: raise ValueError(f'Should be a dataset name, not a path: {name}' ) if not re.match(_split_re , lowerCamelCase__ ): raise ValueError(f'Split name should match \'{_split_re}\'\' but got \'{split}\'.' ) return f'{filename_prefix_for_name(lowerCamelCase__ )}-{split}' def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any]=None ): '''simple docstring''' lowerCamelCase = filename_prefix_for_split(lowerCamelCase__ , lowerCamelCase__ ) if filetype_suffix: prefix += f'.{filetype_suffix}' lowerCamelCase = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) return f'{filepath}*' def __lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ): '''simple docstring''' lowerCamelCase = filename_prefix_for_split(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) if shard_lengths: lowerCamelCase = len(lowerCamelCase__ ) lowerCamelCase = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(lowerCamelCase__ )] if filetype_suffix: lowerCamelCase = [filename + f'.{filetype_suffix}' for filename in filenames] return filenames else: lowerCamelCase = prefix if filetype_suffix: filename += f'.{filetype_suffix}' return [filename]
252
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list: '''simple docstring''' __UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )] for i in range(SCREAMING_SNAKE_CASE ): __UpperCAmelCase = y_points[i] for i in range(2 , SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __UpperCAmelCase = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
333
0
def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[Any] = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def snake_case_ ( lowerCAmelCase_ : List[Any] = 100 ): __lowercase : Optional[int] = 1 __lowercase : Dict = 2 for i in range(2 , max_n + 1 ): __lowercase : List[str] = pre_numerator __lowercase : List[str] = 2 * i // 3 if i % 3 == 0 else 1 __lowercase : Tuple = cur_numerator __lowercase : List[Any] = e_cont * pre_numerator + temp return sum_digits(lowerCAmelCase_ ) if __name__ == "__main__": print(f'''{solution() = }''')
233
def __a ( SCREAMING_SNAKE_CASE ) -> set: '''simple docstring''' __UpperCAmelCase = set() # edges = list of graph's edges __UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: __UpperCAmelCase , __UpperCAmelCase = edges.pop() chosen_vertices.add(SCREAMING_SNAKE_CASE ) chosen_vertices.add(SCREAMING_SNAKE_CASE ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(SCREAMING_SNAKE_CASE ) return chosen_vertices def __a ( SCREAMING_SNAKE_CASE ) -> set: '''simple docstring''' __UpperCAmelCase = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
333
0
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_A , _A ) def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = emb.weight.shape SCREAMING_SNAKE_CASE__ = nn.Linear(_A , _A , bias=_A ) SCREAMING_SNAKE_CASE__ = emb.weight.data return lin_layer def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location='''cpu''' ) SCREAMING_SNAKE_CASE__ = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] SCREAMING_SNAKE_CASE__ = mam_aaa['''model'''] remove_ignore_keys_(_A ) SCREAMING_SNAKE_CASE__ = state_dict['''encoder.embed_tokens.weight'''].shape[0] SCREAMING_SNAKE_CASE__ = MaMaaaConfig( vocab_size=_A , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , ) SCREAMING_SNAKE_CASE__ = state_dict['''decoder.embed_tokens.weight'''] SCREAMING_SNAKE_CASE__ = MaMaaaForConditionalGeneration(_A ) model.model.load_state_dict(_A , strict=_A ) SCREAMING_SNAKE_CASE__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() _SCREAMING_SNAKE_CASE : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
314
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} A_ : int = ['a', 'b', 'c', 'd', 'e'] def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = start # add current to visited visited.append(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # if all neighbors visited add current to sort sort.append(SCREAMING_SNAKE_CASE ) # if all vertices haven't been visited select a new one to visit if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ): for vertice in vertices: if vertice not in visited: __UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # return sort return sort if __name__ == "__main__": A_ : Tuple = topological_sort('a', [], []) print(sort)
333
0
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A_ = logging.get_logger(__name__) A_ = '▁' A_ = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', } A_ = { 'vocab_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json' ), }, 'spm_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model' ) }, } A_ = { 'facebook/s2t-small-librispeech-asr': 10_24, } A_ = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de'] A_ = {'mustc': MUSTC_LANGS} class lowercase( _a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = MAX_MODEL_INPUT_SIZES lowercase__ = ["input_ids", "attention_mask"] lowercase__ = [] def __init__( self: List[Any], a_: int, a_: List[Any], a_: Tuple="<s>", a_: Union[str, Any]="</s>", a_: Union[str, Any]="<pad>", a_: Dict="<unk>", a_: str=False, a_: List[Any]=False, a_: List[Any]=None, a_: str=None, a_: Union[str, Any] = None, **a_: Tuple, ): '''simple docstring''' _snake_case : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowercase__, eos_token=lowercase__, unk_token=lowercase__, pad_token=lowercase__, do_upper_case=lowercase__, do_lower_case=lowercase__, tgt_lang=lowercase__, lang_codes=lowercase__, sp_model_kwargs=self.sp_model_kwargs, **lowercase__, ) _snake_case : List[str] = do_upper_case _snake_case : Tuple = do_lower_case _snake_case : Optional[Any] = load_json(lowercase__ ) _snake_case : str = {v: k for k, v in self.encoder.items()} _snake_case : Any = spm_file _snake_case : Optional[Any] = load_spm(lowercase__, self.sp_model_kwargs ) if lang_codes is not None: _snake_case : List[str] = lang_codes _snake_case : List[str] = LANGUAGES[lang_codes] _snake_case : Tuple = [f"<lang:{lang}>" for lang in self.langs] _snake_case : int = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs} _snake_case : Tuple = self.lang_tokens _snake_case : Tuple = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: _snake_case : Dict = {} @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return len(self.encoder ) @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return self._tgt_lang @tgt_lang.setter def UpperCamelCase_ ( self: Optional[int], a_: str ): '''simple docstring''' _snake_case : List[Any] = new_tgt_lang self.set_tgt_lang_special_tokens(lowercase__ ) def UpperCamelCase_ ( self: Dict, a_: Dict ): '''simple docstring''' _snake_case : List[Any] = self.lang_code_to_id[tgt_lang] _snake_case : Optional[Any] = [lang_code_id] def UpperCamelCase_ ( self: Any, a_: List[str] ): '''simple docstring''' return self.sp_model.encode(lowercase__, out_type=lowercase__ ) def UpperCamelCase_ ( self: Tuple, a_: Tuple ): '''simple docstring''' return self.encoder.get(lowercase__, self.encoder[self.unk_token] ) def UpperCamelCase_ ( self: Dict, a_: Tuple ): '''simple docstring''' return self.decoder.get(lowercase__, self.unk_token ) def UpperCamelCase_ ( self: Tuple, a_: int ): '''simple docstring''' _snake_case : Optional[int] = [] _snake_case : Tuple = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: _snake_case : str = self.sp_model.decode(lowercase__ ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " _snake_case : Optional[Any] = [] else: current_sub_tokens.append(lowercase__ ) _snake_case : Dict = self.sp_model.decode(lowercase__ ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def UpperCamelCase_ ( self: int, a_: Dict, a_: str=None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def UpperCamelCase_ ( self: Any, a_: List[str], a_: List[Any] = None, a_: Dict = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase__, token_ids_a=lowercase__, already_has_special_tokens=lowercase__ ) _snake_case : Optional[Any] = [1] * len(self.prefix_tokens ) _snake_case : Any = [1] if token_ids_a is None: return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Any = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self: List[str] ): '''simple docstring''' _snake_case : Any = self.__dict__.copy() _snake_case : str = None return state def __setstate__( self: Dict, a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = d # for backward compatibility if not hasattr(self, """sp_model_kwargs""" ): _snake_case : Tuple = {} _snake_case : int = load_spm(self.spm_file, self.sp_model_kwargs ) def UpperCamelCase_ ( self: Any, a_: List[Any], a_: Optional[Any] = None ): '''simple docstring''' _snake_case : Optional[int] = Path(lowercase__ ) assert save_dir.is_dir(), f"{save_directory} should be a directory" _snake_case : List[str] = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) _snake_case : str = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder, lowercase__ ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file, lowercase__ ) elif not os.path.isfile(self.spm_file ): with open(lowercase__, """wb""" ) as fi: _snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(lowercase__ ) return (str(lowercase__ ), str(lowercase__ )) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ): """simple docstring""" _snake_case : int = sentencepiece.SentencePieceProcessor(**snake_case__ ) spm.Load(str(snake_case__ ) ) return spm def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" with open(snake_case__ , """r""" ) as f: return json.load(snake_case__ ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any ): """simple docstring""" with open(snake_case__ , """w""" ) as f: json.dump(snake_case__ , snake_case__ , indent=2 )
64
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A_ : int = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
333
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ = { 'configuration_poolformer': [ 'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PoolFormerConfig', 'PoolFormerOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['PoolFormerFeatureExtractor'] a_ = ['PoolFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PoolFormerForImageClassification', 'PoolFormerModel', 'PoolFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
249
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: '''simple docstring''' model.train() __UpperCAmelCase = model(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]: '''simple docstring''' set_seed(4_2 ) __UpperCAmelCase = RegressionModel() __UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = RegressionDataset(length=8_0 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) model.to(accelerator.device ) if sched: __UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 ) __UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 ) __UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 ) __UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 ) # Make a copy of `model` if sched: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' # Test when on a single CPU or GPU that the context manager does nothing __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) # Use a single batch __UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] def __a ( SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' # Test on distributed setup that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) # Use a single batch __UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]: '''simple docstring''' __UpperCAmelCase = Accelerator( split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ): __UpperCAmelCase , __UpperCAmelCase = batch.values() # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) with accelerator.accumulate(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] GradientState._reset_state() def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = Accelerator( split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ): __UpperCAmelCase , __UpperCAmelCase = batch.values() # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' __UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )) if accelerator.num_processes > 1: check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def __a ( ) -> str: '''simple docstring''' __UpperCAmelCase = Accelerator() __UpperCAmelCase = RegressionDataset(length=8_0 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) __UpperCAmelCase = RegressionDataset(length=9_6 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE ) if iteration < len(SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE ) if batch_num < len(SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __a ( ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase = Accelerator() __UpperCAmelCase = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(SCREAMING_SNAKE_CASE ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(SCREAMING_SNAKE_CASE ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
333
0
def __magic_name__ ( __a : Tuple , __a : Optional[int] ): '''simple docstring''' UpperCamelCase__ = len(__a ) UpperCamelCase__ = len(__a ) UpperCamelCase__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] UpperCamelCase__ = True for i in range(__a ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: UpperCamelCase__ = True if a[i].islower(): UpperCamelCase__ = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
244
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore A_ : Optional[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" A_ : Optional[Any] = [file for file in filepaths if file != file.lower()] if upper_files: print(F"""{len(upper_files)} files contain uppercase characters:""") print('\n'.join(upper_files) + '\n') A_ : Tuple = [file for file in filepaths if ' ' in file] if space_files: print(F"""{len(space_files)} files contain space characters:""") print('\n'.join(space_files) + '\n') A_ : str = [file for file in filepaths if '-' in file] if hyphen_files: print(F"""{len(hyphen_files)} files contain hyphen characters:""") print('\n'.join(hyphen_files) + '\n') A_ : Optional[Any] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"""{len(nodir_files)} files are not in a directory:""") print('\n'.join(nodir_files) + '\n') A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
333
0
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES __A : str = logging.get_logger(__name__) __A : str = OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) __A : Optional[int] = OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) __A : Union[str, Any] = OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) __A : Dict = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) __A : Optional[int] = OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) __A : Dict = OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) __A : List[str] = OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) __A : Tuple = OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) __A : Optional[int] = OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) __A : int = OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) __A : Tuple = OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) __A : Tuple = OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) __A : int = OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) __A : Tuple = OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) __A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) __A : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) __A : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) __A : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) __A : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) __A : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) __A : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) __A : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) __A : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) __A : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) __A : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) __A : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) __A : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) __A : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : Union[str, Any] = FLAX_MODEL_MAPPING __A : Tuple = auto_class_update(FlaxAutoModel) class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING __A : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : Any = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING __A : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING __A : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __A : Union[str, Any] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __A : Tuple = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING __A : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : Dict = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __A : Dict = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING __A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : Tuple = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING __A : Tuple = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __A : int = auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING __A : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class __A ( _BaseAutoModelClass ): lowerCAmelCase_ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING __A : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
138
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' __UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] __UpperCAmelCase = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key: return input_string for position, character in enumerate(SCREAMING_SNAKE_CASE ): __UpperCAmelCase = position % (lowest * 2) # puts it in bounds __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid] __UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE ) return output_string def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' __UpperCAmelCase = [] __UpperCAmelCase = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1: return input_string __UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template for position in range(len(SCREAMING_SNAKE_CASE ) ): __UpperCAmelCase = position % (lowest * 2) # puts it in bounds __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('''*''' ) __UpperCAmelCase = 0 for row in temp_grid: # fills in the characters __UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )] grid.append(list(SCREAMING_SNAKE_CASE ) ) counter += len(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = '''''' # reads as zigzag for position in range(len(SCREAMING_SNAKE_CASE ) ): __UpperCAmelCase = position % (lowest * 2) # puts it in bounds __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]: '''simple docstring''' __UpperCAmelCase = {} for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key __UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return results if __name__ == "__main__": import doctest doctest.testmod()
333
0
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : int = 3_8_4 if "tiny" in model_name: __UpperCAmelCase : List[Any] = [3, 3, 9, 3] __UpperCAmelCase : Optional[int] = [9_6, 1_9_2, 3_8_4, 7_6_8] if "small" in model_name: __UpperCAmelCase : int = [3, 3, 2_7, 3] __UpperCAmelCase : int = [9_6, 1_9_2, 3_8_4, 7_6_8] if "base" in model_name: __UpperCAmelCase : List[Any] = [3, 3, 2_7, 3] __UpperCAmelCase : Optional[int] = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4] __UpperCAmelCase : int = 5_1_2 if "large" in model_name: __UpperCAmelCase : List[Any] = [3, 3, 2_7, 3] __UpperCAmelCase : Tuple = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6] __UpperCAmelCase : str = 7_6_8 if "xlarge" in model_name: __UpperCAmelCase : Union[str, Any] = [3, 3, 2_7, 3] __UpperCAmelCase : Union[str, Any] = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] __UpperCAmelCase : Optional[Any] = 1_0_2_4 # set label information __UpperCAmelCase : Union[str, Any] = 1_5_0 __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Union[str, Any] = """ade20k-id2label.json""" __UpperCAmelCase : str = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : List[str] = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Any = {v: k for k, v in idalabel.items()} __UpperCAmelCase : Optional[Any] = ConvNextConfig( depths=_UpperCamelCase , hidden_sizes=_UpperCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) __UpperCAmelCase : List[str] = UperNetConfig( backbone_config=_UpperCamelCase , auxiliary_in_channels=_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase , ) return config def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = [] # fmt: off # stem rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") ) rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") ) rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") ) rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Tuple ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = dct.pop(_UpperCamelCase ) __UpperCAmelCase : str = val def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = { """upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""", """upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""", """upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""", """upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""", """upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""", } __UpperCAmelCase : List[str] = model_name_to_url[model_name] __UpperCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="""cpu""" )["""state_dict"""] __UpperCAmelCase : str = get_upernet_config(_UpperCamelCase ) __UpperCAmelCase : Tuple = UperNetForSemanticSegmentation(_UpperCamelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __UpperCAmelCase : Tuple = state_dict.pop(_UpperCamelCase ) if "bn" in key: __UpperCAmelCase : Dict = key.replace("""bn""" , """batch_norm""" ) __UpperCAmelCase : int = val # rename keys __UpperCAmelCase : List[str] = create_rename_keys(_UpperCamelCase ) for src, dest in rename_keys: rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) # verify on image __UpperCAmelCase : Union[str, Any] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" __UpperCAmelCase : Union[str, Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("""RGB""" ) __UpperCAmelCase : Union[str, Any] = SegformerImageProcessor() __UpperCAmelCase : int = processor(_UpperCamelCase , return_tensors="""pt""" ).pixel_values with torch.no_grad(): __UpperCAmelCase : Optional[Any] = model(_UpperCamelCase ) if model_name == "upernet-convnext-tiny": __UpperCAmelCase : Any = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ) elif model_name == "upernet-convnext-small": __UpperCAmelCase : Tuple = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] ) elif model_name == "upernet-convnext-base": __UpperCAmelCase : Tuple = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] ) elif model_name == "upernet-convnext-large": __UpperCAmelCase : Union[str, Any] = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] ) elif model_name == "upernet-convnext-xlarge": __UpperCAmelCase : Optional[int] = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCamelCase ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_UpperCamelCase ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-convnext-tiny', type=str, choices=[F"upernet-convnext-{size}" for size in ['tiny', 'small', 'base', 'large', 'xlarge']], help='Name of the ConvNext UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase : Tuple = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
115
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class A_ ( _a , _a , _a , unittest.TestCase ): '''simple docstring''' a__ = StableUnCLIPPipeline a__ = TEXT_TO_IMAGE_PARAMS a__ = TEXT_TO_IMAGE_BATCH_PARAMS a__ = TEXT_TO_IMAGE_IMAGE_PARAMS a__ = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false a__ = False def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = 32 __UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) __UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ ) __UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , ) torch.manual_seed(0 ) __UpperCAmelCase = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL() __UpperCAmelCase = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]: if str(lowercase__ ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(lowercase__ ) else: __UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) __UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ ) def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=lowercase__ ) @slow @require_torch_gpu class A_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ (self ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' ) __UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) __UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
333
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase__ : Dict = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase__ : str = 'cuda' if torch.cuda.is_available() else 'cpu' def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: List[str]=1_00 , _lowerCamelCase: Any=" " ): __SCREAMING_SNAKE_CASE : Optional[int] = text.split(_lowerCamelCase ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )] def lowerCAmelCase_ ( _lowerCamelCase: List[Any] ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(_lowerCamelCase ): titles.append(title if title is not None else """""" ) texts.append(_lowerCamelCase ) return {"title": titles, "text": texts} def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: int ): __SCREAMING_SNAKE_CASE : List[Any] = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=_lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] __SCREAMING_SNAKE_CASE : Optional[Any] = ctx_encoder(input_ids.to(device=_lowerCamelCase ) , return_dict=_lowerCamelCase ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: Any , _lowerCamelCase: int , ): logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way __SCREAMING_SNAKE_CASE : Dict = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words __SCREAMING_SNAKE_CASE : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc ) # And compute the embeddings __SCREAMING_SNAKE_CASE : Dict = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_lowerCamelCase ) __SCREAMING_SNAKE_CASE : Optional[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space __SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.map( partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase ) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , ) # And finally save your dataset __SCREAMING_SNAKE_CASE : int = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(_lowerCamelCase ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search __SCREAMING_SNAKE_CASE : List[str] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=_lowerCamelCase ) # And save the index __SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(_lowerCamelCase ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class _UpperCamelCase : '''simple docstring''' _A : Any = field( default=str(Path(_a ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , ) _A : List[Any] = field( default=_a , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , ) _A : Any = field( default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , ) _A : Optional[Any] = field( default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={ '''help''': ( '''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or''' ''' \'facebook/dpr-ctx_encoder-multiset-base\'''' ) } , ) _A : List[str] = field( default=str(Path(_a ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , ) @dataclass class _UpperCamelCase : '''simple docstring''' _A : Any = field( default=_a , metadata={ '''help''': '''The number of processes to use to split the documents into passages. Default is single process.''' } , ) _A : Tuple = field( default=16 , metadata={ '''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.''' } , ) @dataclass class _UpperCamelCase : '''simple docstring''' _A : Dict = field( default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , ) _A : List[Any] = field( default=128 , metadata={ '''help''': ( '''The number of bi-directional links created for every new element during the HNSW index construction.''' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase__ : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase__ : Any = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase__ : Optional[Any] = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
112
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A_ : int = logging.get_logger(__name__) A_ : str = {'tokenizer_file': 'tokenizer.json'} A_ : List[str] = { 'tokenizer_file': { 'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json', 'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json', 'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json', 'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json', 'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json', 'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json', 'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json', }, } class A_ ( _a ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = ["input_ids", "attention_mask"] a__ = None def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict: super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , ) __UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: __UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) __UpperCAmelCase = add_prefix_space __UpperCAmelCase = pre_tok_class(**lowercase__ ) __UpperCAmelCase = add_prefix_space def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding: __UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ''' pretokenized inputs.''' ) return super()._batch_encode_plus(*lowercase__ , **lowercase__ ) def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding: __UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ''' pretokenized inputs.''' ) return super()._encode_plus(*lowercase__ , **lowercase__ ) def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]: __UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def lowerCAmelCase_ (self , lowercase__ ) -> List[int]: __UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: __UpperCAmelCase = input_ids[-self.model_max_length :] return input_ids
333
0
def lowercase_ ( _lowerCamelCase : int): for i in range(len(_lowerCamelCase) - 1 , 0 , -1): lowercase__ : List[Any] = False for j in range(_lowerCamelCase , 0 , -1): if unsorted[j] < unsorted[j - 1]: lowercase__ , lowercase__ : List[Any] = unsorted[j - 1], unsorted[j] lowercase__ : Optional[Any] = True for j in range(_lowerCamelCase): if unsorted[j] > unsorted[j + 1]: lowercase__ , lowercase__ : Dict = unsorted[j + 1], unsorted[j] lowercase__ : List[Any] = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(f"{cocktail_shaker_sort(unsorted) = }")
87
import math import sys def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' if number != int(SCREAMING_SNAKE_CASE ): raise ValueError('''the value of input must be a natural number''' ) if number < 0: raise ValueError('''the value of input must not be a negative number''' ) if number == 0: return 1 __UpperCAmelCase = [-1] * (number + 1) __UpperCAmelCase = 0 for i in range(1 , number + 1 ): __UpperCAmelCase = sys.maxsize __UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE ) ) for j in range(1 , root + 1 ): __UpperCAmelCase = 1 + answers[i - (j**2)] __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCAmelCase = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
333
0
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer lowerCAmelCase__ : str ='bart' lowerCAmelCase__ : List[str] =True @st.cache(allow_output_mutation=a__ ) def __lowercase ( ) -> Union[str, Any]: if LOAD_DENSE_INDEX: __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' ) __SCREAMING_SNAKE_CASE = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' ) __SCREAMING_SNAKE_CASE = qar_model.eval() else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (None, None) if MODEL_TYPE == "bart": __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('yjernite/bart_eli5' ) __SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' ) __SCREAMING_SNAKE_CASE = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' ) sas_model.load_state_dict(save_dict['model'] ) __SCREAMING_SNAKE_CASE = sas_model.eval() else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=a__ ) def __lowercase ( ) -> Dict: if LOAD_DENSE_INDEX: __SCREAMING_SNAKE_CASE = faiss.StandardGpuResources() __SCREAMING_SNAKE_CASE = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train'] __SCREAMING_SNAKE_CASE = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_28) , ) __SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(1_28 ) __SCREAMING_SNAKE_CASE = faiss.index_cpu_to_gpu(a__ , 1 , a__ ) wikiaab_gpu_index_flat.add(a__ ) # TODO fix for larger GPU else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (None, None) __SCREAMING_SNAKE_CASE = Elasticsearch([{'host': 'localhost', 'port': '9200'}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=a__ ) def __lowercase ( ) -> List[Any]: __SCREAMING_SNAKE_CASE = datasets.load_dataset('eli5' , name='LFQA_reddit' ) __SCREAMING_SNAKE_CASE = elia['train_eli5'] __SCREAMING_SNAKE_CASE = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_28) ) __SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(1_28 ) eli5_train_q_index.add(a__ ) return (elia_train, eli5_train_q_index) lowerCAmelCase__ : Any =load_indexes() lowerCAmelCase__ : int =load_models() lowerCAmelCase__ : Dict =load_train_data() def __lowercase ( a__ , a__=10 ) -> Dict: __SCREAMING_SNAKE_CASE = embed_questions_for_retrieval([question] , a__ , a__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = eli5_train_q_index.search(a__ , a__ ) __SCREAMING_SNAKE_CASE = [elia_train[int(a__ )] for i in I[0]] return nn_examples def __lowercase ( a__ , a__="wiki40b" , a__="dense" , a__=10 ) -> Tuple: if source == "none": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (' <P> '.join(['' for _ in range(11 )] ).strip(), []) else: if method == "dense": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = query_qa_dense_index( a__ , a__ , a__ , a__ , a__ , a__ ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = query_es_index( a__ , a__ , index_name='english_wiki40b_snippets_100w' , n_results=a__ , ) __SCREAMING_SNAKE_CASE = [ (res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst ] __SCREAMING_SNAKE_CASE = 'question: {} context: {}'.format(a__ , a__ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda a__ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a__ : None), } ) def __lowercase ( a__ , a__ , a__ , a__=64 , a__=2_56 , a__=False , a__=2 , a__=0.95 , a__=0.8 ) -> str: with torch.no_grad(): __SCREAMING_SNAKE_CASE = qa_sas_generate( a__ , a__ , a__ , num_answers=1 , num_beams=a__ , min_len=a__ , max_len=a__ , do_sample=a__ , temp=a__ , top_p=a__ , top_k=a__ , max_input_length=10_24 , device='cuda:0' , )[0] return (answer, support_list) st.title('''Long Form Question Answering with ELI5''') # Start sidebar lowerCAmelCase__ : Optional[Any] ='<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' lowerCAmelCase__ : Any ='\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia lowerCAmelCase__ : int ='\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) lowerCAmelCase__ : Dict =[ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] lowerCAmelCase__ : Tuple =st.sidebar.checkbox('''Demo options''') if demo_options: lowerCAmelCase__ : str =st.sidebar.selectbox( '''''', action_list, index=3, ) lowerCAmelCase__ : List[str] =action_list.index(action_st) lowerCAmelCase__ : int =st.sidebar.selectbox( '''''', ['''Show full text of passages''', '''Show passage section titles'''], index=0, ) lowerCAmelCase__ : str =show_type == 'Show full text of passages' else: lowerCAmelCase__ : Union[str, Any] =3 lowerCAmelCase__ : str =True lowerCAmelCase__ : Tuple =st.sidebar.checkbox('''Retrieval options''') if retrieval_options: lowerCAmelCase__ : Any ='\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) lowerCAmelCase__ : List[str] =st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none''']) lowerCAmelCase__ : int =st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed''']) else: lowerCAmelCase__ : Optional[int] ='wiki40b' lowerCAmelCase__ : List[str] ='dense' lowerCAmelCase__ : List[str] ='beam' lowerCAmelCase__ : List[Any] =2 lowerCAmelCase__ : Union[str, Any] =64 lowerCAmelCase__ : Union[str, Any] =256 lowerCAmelCase__ : Optional[int] =None lowerCAmelCase__ : int =None lowerCAmelCase__ : Any =st.sidebar.checkbox('''Generation options''') if generate_options: lowerCAmelCase__ : int ='\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) lowerCAmelCase__ : Union[str, Any] =st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled''']) lowerCAmelCase__ : List[str] =st.sidebar.slider( '''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None ) lowerCAmelCase__ : int =st.sidebar.slider( '''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": lowerCAmelCase__ : Union[str, Any] =st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: lowerCAmelCase__ : Dict =st.sidebar.slider( '''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) lowerCAmelCase__ : Optional[Any] =st.sidebar.slider( '''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) lowerCAmelCase__ : int =None # start main text lowerCAmelCase__ : str =[ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] lowerCAmelCase__ : Any =st.selectbox( '''What would you like to ask? ---- select <MY QUESTION> to enter a new query''', questions_list, index=1, ) if question_s == "<MY QUESTION>": lowerCAmelCase__ : Optional[Any] =st.text_input('''Enter your question here:''', '''''') else: lowerCAmelCase__ : Optional[Any] =question_s if st.button('''Show me!'''): if action in [0, 1, 3]: if index_type == "mixed": lowerCAmelCase__ : Tuple =make_support(question, source=wiki_source, method='''dense''', n_results=10) lowerCAmelCase__ : Optional[Any] =make_support(question, source=wiki_source, method='''sparse''', n_results=10) lowerCAmelCase__ : Optional[Any] =[] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] lowerCAmelCase__ : Optional[int] =support_list[:10] lowerCAmelCase__ : str ='<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: lowerCAmelCase__ : Union[str, Any] =make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: lowerCAmelCase__ : Dict =answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == '''sampled'''), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('''### The model generated answer is:''') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''') for i, res in enumerate(support_list): lowerCAmelCase__ : Optional[Any] ='https://en.wikipedia.org/wiki/{}'.format(res[0].replace(''' ''', '''_''')) lowerCAmelCase__ : Optional[int] =res[1].strip() if sec_titles == "": lowerCAmelCase__ : List[Any] ='[{}]({})'.format(res[0], wiki_url) else: lowerCAmelCase__ : List[str] =sec_titles.split(''' & ''') lowerCAmelCase__ : List[str] =' & '.join( ['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list] ) st.markdown( '''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True ) if action in [2, 3]: lowerCAmelCase__ : Any =find_nearest_training(question) lowerCAmelCase__ : Any =nn_train_list[0] st.markdown( '''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title''']) ) lowerCAmelCase__ : Tuple =[ '{}. {}'.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != ''''''])) for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score'''])) if i == 0 or sc > 2 ] st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st))) lowerCAmelCase__ : Dict ='\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
257
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL A_ : Tuple = logging.get_logger(__name__) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' __UpperCAmelCase = b.T __UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 ) __UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 ) __UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :] return d def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' __UpperCAmelCase = x.reshape(-1 , 3 ) __UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return np.argmin(SCREAMING_SNAKE_CASE , axis=1 ) class A_ ( _a ): '''simple docstring''' a__ = ["pixel_values"] def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None: super().__init__(**lowercase__ ) __UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256} __UpperCAmelCase = get_size_dict(lowercase__ ) __UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None __UpperCAmelCase = do_resize __UpperCAmelCase = size __UpperCAmelCase = resample __UpperCAmelCase = do_normalize __UpperCAmelCase = do_color_quantize def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray: __UpperCAmelCase = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' ) return resize( lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray: __UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ ) __UpperCAmelCase = image - 1 return image def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image: __UpperCAmelCase = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase = size if size is not None else self.size __UpperCAmelCase = get_size_dict(lowercase__ ) __UpperCAmelCase = resample if resample is not None else self.resample __UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize __UpperCAmelCase = clusters if clusters is not None else self.clusters __UpperCAmelCase = np.array(lowercase__ ) __UpperCAmelCase = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_color_quantize and clusters is None: raise ValueError('''Clusters must be specified if do_color_quantize is True.''' ) # All transformations expect numpy arrays. __UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images] if do_resize: __UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images] if do_normalize: __UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images] if do_color_quantize: __UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) __UpperCAmelCase = np.array(lowercase__ ) __UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) __UpperCAmelCase = images.shape[0] __UpperCAmelCase = images.reshape(lowercase__ , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. __UpperCAmelCase = list(lowercase__ ) else: __UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] __UpperCAmelCase = {'''input_ids''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
333
0
import argparse import struct import unittest class __lowercase : """simple docstring""" def __init__( self , A ) -> None: '''simple docstring''' lowerCamelCase = data # Initialize hash values lowerCamelCase = [ 0x6A_09E_667, 0xBB_67A_E85, 0x3C_6EF_372, 0xA5_4FF_53A, 0x51_0E5_27F, 0x9B_056_88C, 0x1F_83D_9AB, 0x5B_E0C_D19, ] # Initialize round constants lowerCamelCase = [ 0x42_8A2_F98, 0x71_374_491, 0xB5_C0F_BCF, 0xE9_B5D_BA5, 0x39_56C_25B, 0x59_F11_1F1, 0x92_3F8_2A4, 0xAB_1C5_ED5, 0xD8_07A_A98, 0x12_835_B01, 0x24_318_5BE, 0x55_0C7_DC3, 0x72_BE5_D74, 0x80_DEB_1FE, 0x9B_DC0_6A7, 0xC1_9BF_174, 0xE4_9B6_9C1, 0xEF_BE4_786, 0x0F_C19_DC6, 0x24_0CA_1CC, 0x2D_E92_C6F, 0x4A_748_4AA, 0x5C_B0A_9DC, 0x76_F98_8DA, 0x98_3E5_152, 0xA8_31C_66D, 0xB0_032_7C8, 0xBF_597_FC7, 0xC6_E00_BF3, 0xD5_A79_147, 0x06_CA6_351, 0x14_292_967, 0x27_B70_A85, 0x2E_1B2_138, 0x4D_2C6_DFC, 0x53_380_D13, 0x65_0A7_354, 0x76_6A0_ABB, 0x81_C2C_92E, 0x92_722_C85, 0xA2_BFE_8A1, 0xA8_1A6_64B, 0xC2_4B8_B70, 0xC7_6C5_1A3, 0xD1_92E_819, 0xD6_990_624, 0xF4_0E3_585, 0x10_6AA_070, 0x19_A4C_116, 0x1E_376_C08, 0x27_487_74C, 0x34_B0B_CB5, 0x39_1C0_CB3, 0x4E_D8A_A4A, 0x5B_9CC_A4F, 0x68_2E6_FF3, 0x74_8F8_2EE, 0x78_A56_36F, 0x84_C87_814, 0x8C_C70_208, 0x90_BEF_FFA, 0xA4_506_CEB, 0xBE_F9A_3F7, 0xC6_717_8F2, ] lowerCamelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def __A ( A ) -> bytes: '''simple docstring''' lowerCamelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(lowercase__ ) + 8) % 64)) lowerCamelCase = struct.pack(""">Q""" , (len(lowercase__ ) * 8) ) return data + padding + big_endian_integer def __A ( self ) -> None: '''simple docstring''' lowerCamelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowerCamelCase = list(struct.unpack(""">16L""" , lowercase__ ) ) # add 48 0-ed integers words += [0] * 48 lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array lowerCamelCase = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) lowerCamelCase = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) lowerCamelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100_000_000 # Compression lowerCamelCase = self.ror(lowercase__ , 6 ) ^ self.ror(lowercase__ , 11 ) ^ self.ror(lowercase__ , 25 ) lowerCamelCase = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g) lowerCamelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100_000_000 lowerCamelCase = self.ror(lowercase__ , 2 ) ^ self.ror(lowercase__ , 13 ) ^ self.ror(lowercase__ , 22 ) lowerCamelCase = (a & b) ^ (a & c) ^ (b & c) lowerCamelCase = (sa + maj) % 0x100_000_000 lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = ( g, f, e, ((d + tempa) % 0x100_000_000), c, b, a, ((tempa + tempa) % 0x100_000_000), ) lowerCamelCase = [a, b, c, d, e, f, g, h] # Modify final values lowerCamelCase = [ ((element + mutated_hash_values[index]) % 0x100_000_000) for index, element in enumerate(self.hashes ) ] lowerCamelCase = """""".join([hex(lowercase__ )[2:].zfill(8 ) for value in self.hashes] ) def __A ( self , A , A ) -> int: '''simple docstring''' return 0xFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations) class __lowercase ( unittest.TestCase ): """simple docstring""" def __A ( self ) -> None: '''simple docstring''' import hashlib lowerCamelCase = bytes("""Test String""" , """utf-8""" ) self.assertEqual(SHAaaa(lowercase__ ).hash , hashlib.shaaaa(lowercase__ ).hexdigest() ) def __lowerCamelCase ( ): '''simple docstring''' import doctest doctest.testmod() lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) lowerCamelCase = parser.parse_args() lowerCamelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: lowerCamelCase = f.read() else: lowerCamelCase = bytes(lowerCamelCase__ , """utf-8""" ) print(SHAaaa(lowerCamelCase__ ).hash ) if __name__ == "__main__": main()
252
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A_ : Optional[int] = { 'configuration_poolformer': [ 'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PoolFormerConfig', 'PoolFormerOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[str] = ['PoolFormerFeatureExtractor'] A_ : Dict = ['PoolFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ 'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PoolFormerForImageClassification', 'PoolFormerModel', 'PoolFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
333
0
import os def snake_case_ ( lowerCAmelCase_ : Union[str, Any] = "input.txt" ): with open(os.path.join(os.path.dirname(lowerCAmelCase_ ) , lowerCAmelCase_ ) ) as input_file: __lowercase : List[Any] = [ [int(lowerCAmelCase_ ) for element in line.split(""",""" )] for line in input_file.readlines() ] __lowercase : List[Any] = len(lowerCAmelCase_ ) __lowercase : Union[str, Any] = len(matrix[0] ) __lowercase : List[str] = [[-1 for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )] for i in range(lowerCAmelCase_ ): __lowercase : int = matrix[i][0] for j in range(1 , lowerCAmelCase_ ): for i in range(lowerCAmelCase_ ): __lowercase : Tuple = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , lowerCAmelCase_ ): __lowercase : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): __lowercase : Union[str, Any] = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'''{solution() = }''')
233
import math def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: '''simple docstring''' if ( not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * power_factor def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: '''simple docstring''' if ( not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
333
0
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def UpperCAmelCase_ ( _A ): '''simple docstring''' return (data["data"], data["target"]) def UpperCAmelCase_ ( _A , _A , _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(_A , _A ) # Predict target for test data SCREAMING_SNAKE_CASE__ = xgb.predict(_A ) SCREAMING_SNAKE_CASE__ = predictions.reshape(len(_A ) , 1 ) return predictions def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = fetch_california_housing() SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = data_handling(_A ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = train_test_split( _A , _A , test_size=0.2_5 , random_state=1 ) SCREAMING_SNAKE_CASE__ = xgboost(_A , _A , _A ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(_A , _A )}''' ) print(F'''Mean Square Error : {mean_squared_error(_A , _A )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
314
def __a ( ) -> list[list[int]]: '''simple docstring''' return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )] A_ : Union[str, Any] = generate_large_matrix() A_ : Union[str, Any] = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __a ( SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid ) assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) ) def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' __UpperCAmelCase = 0 __UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: __UpperCAmelCase = (left + right) // 2 __UpperCAmelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: __UpperCAmelCase = mid + 1 else: __UpperCAmelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' __UpperCAmelCase = 0 __UpperCAmelCase = len(grid[0] ) for i in range(len(SCREAMING_SNAKE_CASE ) ): __UpperCAmelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' __UpperCAmelCase = 0 for row in grid: for i, number in enumerate(SCREAMING_SNAKE_CASE ): if number < 0: total += len(SCREAMING_SNAKE_CASE ) - i break return total def __a ( ) -> None: '''simple docstring''' from timeit import timeit print('''Running benchmarks''' ) __UpperCAmelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): __UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 ) print(f'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
333
0
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging A_ = logging.get_logger(__name__) A_ = { 'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': ( 'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class lowercase( _a ): '''simple docstring''' lowercase__ = "trajectory_transformer" lowercase__ = ["past_key_values"] lowercase__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self: int, a_: Dict=100, a_: List[Any]=5, a_: Any=1, a_: Optional[int]=1, a_: Any=249, a_: Union[str, Any]=6, a_: Dict=17, a_: Optional[int]=25, a_: Optional[Any]=4, a_: Optional[Any]=4, a_: Optional[Any]=128, a_: Any=0.1, a_: int=0.1, a_: List[str]=0.1, a_: Any=0.0_006, a_: str=512, a_: Any=0.02, a_: Any=1E-12, a_: int=1, a_: str=True, a_: Dict=1, a_: int=50_256, a_: List[str]=50_256, **a_: Tuple, ): '''simple docstring''' _snake_case : Any = vocab_size _snake_case : Optional[int] = action_weight _snake_case : str = reward_weight _snake_case : List[Any] = value_weight _snake_case : List[Any] = max_position_embeddings _snake_case : str = block_size _snake_case : Optional[Any] = action_dim _snake_case : List[str] = observation_dim _snake_case : int = transition_dim _snake_case : List[str] = learning_rate _snake_case : Union[str, Any] = n_layer _snake_case : int = n_head _snake_case : str = n_embd _snake_case : Dict = embd_pdrop _snake_case : Dict = attn_pdrop _snake_case : List[Any] = resid_pdrop _snake_case : int = initializer_range _snake_case : int = layer_norm_eps _snake_case : Any = kaiming_initializer_range _snake_case : Optional[int] = use_cache super().__init__(pad_token_id=lowercase__, bos_token_id=lowercase__, eos_token_id=lowercase__, **lowercase__ )
64
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 A_ : List[str] = sys.version_info >= (3, 10) def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str: '''simple docstring''' return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE ) @dataclass class A_ : '''simple docstring''' a__ = 42 a__ = 42 a__ = 42 a__ = 42 @dataclass class A_ : '''simple docstring''' a__ = 42 a__ = field(default="toto" , metadata={"help": "help message"} ) @dataclass class A_ : '''simple docstring''' a__ = False a__ = True a__ = None class A_ ( _a ): '''simple docstring''' a__ = "titi" a__ = "toto" class A_ ( _a ): '''simple docstring''' a__ = "titi" a__ = "toto" a__ = 42 @dataclass class A_ : '''simple docstring''' a__ = "toto" def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = BasicEnum(self.foo ) @dataclass class A_ : '''simple docstring''' a__ = "toto" def lowerCAmelCase_ (self ) -> Dict: __UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class A_ : '''simple docstring''' a__ = None a__ = field(default=_a , metadata={"help": "help message"} ) a__ = None a__ = list_field(default=[] ) a__ = list_field(default=[] ) @dataclass class A_ : '''simple docstring''' a__ = list_field(default=[] ) a__ = list_field(default=[1, 2, 3] ) a__ = list_field(default=["Hallo", "Bonjour", "Hello"] ) a__ = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class A_ : '''simple docstring''' a__ = field() a__ = field() a__ = field() def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class A_ : '''simple docstring''' a__ = 42 a__ = field() a__ = None a__ = field(default="toto" , metadata={"help": "help message"} ) a__ = list_field(default=["Hallo", "Bonjour", "Hello"] ) if is_python_no_less_than_3_10: @dataclass class A_ : '''simple docstring''' a__ = False a__ = True a__ = None @dataclass class A_ : '''simple docstring''' a__ = None a__ = field(default=_a , metadata={"help": "help message"} ) a__ = None a__ = list_field(default=[] ) a__ = list_field(default=[] ) class A_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): __UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''} __UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) ) del xx["type"], yy["type"] self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ ) self.assertFalse(example.flag ) def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=lowercase__ ) expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' ) self.argparsersEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' ) expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' ) expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ ) __UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(lowercase__ ) for dataclass_type in dataclass_types: __UpperCAmelCase = HfArgumentParser(lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) def lowerCAmelCase_ (self ) -> Dict: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) __UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) __UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) __UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase_ (self ) -> str: @dataclass class A_ : '''simple docstring''' a__ = "toto" __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase_ (self ) -> str: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual( lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) __UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase_ (self ) -> List[str]: __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ ) expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' ) expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ ) __UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(lowercase__ ) for dataclass_type in dataclass_types: __UpperCAmelCase = HfArgumentParser(lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) ) __UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , ) self.argparsersEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , ) expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ ) expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } __UpperCAmelCase = parser.parse_dict(lowercase__ )[0] __UpperCAmelCase = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ ) def lowerCAmelCase_ (self ) -> Any: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' ) os.mkdir(lowercase__ ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] __UpperCAmelCase = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> List[Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' ) os.mkdir(lowercase__ ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] __UpperCAmelCase = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = HfArgumentParser(lowercase__ ) self.assertIsNotNone(lowercase__ )
333
0
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): while second != 0: __lowercase : Tuple = first & second first ^= second __lowercase : Dict = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() a_ = int(input('Enter the first number: ').strip()) a_ = int(input('Enter the second number: ').strip()) print(F"{add(first, second) = }")
249
import doctest from collections import deque import numpy as np class A_ : '''simple docstring''' def __init__(self ) -> None: __UpperCAmelCase = [2, 1, 2, -1] __UpperCAmelCase = [1, 2, 3, 4] def lowerCAmelCase_ (self ) -> list[float]: __UpperCAmelCase = len(self.first_signal ) __UpperCAmelCase = len(self.second_signal ) __UpperCAmelCase = max(lowercase__ , lowercase__ ) # create a zero matrix of max_length x max_length __UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(lowercase__ ): __UpperCAmelCase = deque(self.second_signal ) rotated_signal.rotate(lowercase__ ) for j, item in enumerate(lowercase__ ): matrix[i][j] += item # multiply the matrix with the first signal __UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(lowercase__ , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
333
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { 'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class __A( _a ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """pegasus""" SCREAMING_SNAKE_CASE__ = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__(self , SCREAMING_SNAKE_CASE_=5_02_65 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = vocab_size UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = d_model UpperCamelCase__ = encoder_ffn_dim UpperCamelCase__ = encoder_layers UpperCamelCase__ = encoder_attention_heads UpperCamelCase__ = decoder_ffn_dim UpperCamelCase__ = decoder_layers UpperCamelCase__ = decoder_attention_heads UpperCamelCase__ = dropout UpperCamelCase__ = attention_dropout UpperCamelCase__ = activation_dropout UpperCamelCase__ = activation_function UpperCamelCase__ = init_std UpperCamelCase__ = encoder_layerdrop UpperCamelCase__ = decoder_layerdrop UpperCamelCase__ = use_cache UpperCamelCase__ = encoder_layers UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , ) @property def UpperCAmelCase_ (self ): return self.encoder_attention_heads @property def UpperCAmelCase_ (self ): return self.d_model
244
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[Any] = { 'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class A_ ( _a ): '''simple docstring''' a__ = "pegasus" a__ = ["past_key_values"] a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str: __UpperCAmelCase = vocab_size __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = d_model __UpperCAmelCase = encoder_ffn_dim __UpperCAmelCase = encoder_layers __UpperCAmelCase = encoder_attention_heads __UpperCAmelCase = decoder_ffn_dim __UpperCAmelCase = decoder_layers __UpperCAmelCase = decoder_attention_heads __UpperCAmelCase = dropout __UpperCAmelCase = attention_dropout __UpperCAmelCase = activation_dropout __UpperCAmelCase = activation_function __UpperCAmelCase = init_std __UpperCAmelCase = encoder_layerdrop __UpperCAmelCase = decoder_layerdrop __UpperCAmelCase = use_cache __UpperCAmelCase = encoder_layers __UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , ) @property def lowerCAmelCase_ (self ) -> int: return self.encoder_attention_heads @property def lowerCAmelCase_ (self ) -> int: return self.d_model
333
0
from torch import nn class __A ( nn.Module ): def __init__( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ): super().__init__() lowerCAmelCase : str = class_size lowerCAmelCase : Any = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowerCAmelCase : Union[str, Any] = nn.Linear(lowercase__ , lowercase__ ) def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[Any] ): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowerCAmelCase : Union[str, Any] = self.mlp(lowercase__ ) return logits
138
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ ( _a , unittest.TestCase ): '''simple docstring''' a__ = LongformerTokenizer a__ = True a__ = LongformerTokenizerFast a__ = True def lowerCAmelCase_ (self ) -> Any: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) __UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __UpperCAmelCase = {'''unk_token''': '''<unk>'''} __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase__ ) ) def lowerCAmelCase_ (self , **lowercase__ ) -> int: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def lowerCAmelCase_ (self , lowercase__ ) -> Dict: __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = '''lower newer''' return input_text, output_text def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True) self.assertListEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokens + [tokenizer.unk_token] __UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ ) def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' ) __UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowerCAmelCase_ (self ) -> Any: __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = '''Encode this sequence.''' __UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowercase__ , lowercase__ ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) # Testing spaces after special tokens __UpperCAmelCase = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space __UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ ) __UpperCAmelCase = '''Encode <mask> sequence''' __UpperCAmelCase = '''Encode <mask>sequence''' __UpperCAmelCase = tokenizer.encode(lowercase__ ) __UpperCAmelCase = encoded.index(lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokenizer.encode(lowercase__ ) __UpperCAmelCase = encoded.index(lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: pass def lowerCAmelCase_ (self ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __UpperCAmelCase = '''A, <mask> AllenNLP sentence.''' __UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) __UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def lowerCAmelCase_ (self ) -> Optional[int]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ ) self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ ) self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}''' __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
333
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : Tuple = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class lowerCamelCase__ ( _a ): """simple docstring""" __a = """open-llama""" def __init__( self : str , UpperCamelCase : List[Any]=100_000 , UpperCamelCase : List[Any]=4_096 , UpperCamelCase : Tuple=11_008 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : int=32 , UpperCamelCase : str="silu" , UpperCamelCase : List[Any]=2_048 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Optional[int]=1e-6 , UpperCamelCase : Dict=True , UpperCamelCase : Dict=0 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Any=False , UpperCamelCase : Tuple=True , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Dict=True , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : Tuple , ): '''simple docstring''' __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : List[Any] = num_hidden_layers __UpperCAmelCase : Optional[Any] = num_attention_heads __UpperCAmelCase : Dict = hidden_act __UpperCAmelCase : int = initializer_range __UpperCAmelCase : Optional[int] = rms_norm_eps __UpperCAmelCase : Any = use_cache __UpperCAmelCase : Optional[Any] = kwargs.pop( """use_memorry_efficient_attention""" , lowercase__ ) __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : Optional[Any] = attention_dropout_prob __UpperCAmelCase : List[str] = use_stable_embedding __UpperCAmelCase : List[str] = shared_input_output_embedding __UpperCAmelCase : Union[str, Any] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ , ) def lowerCamelCase__ ( self : str ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowercase__ ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) __UpperCAmelCase : Optional[int] = self.rope_scaling.get("""type""" , lowercase__ ) __UpperCAmelCase : Any = self.rope_scaling.get("""factor""" , lowercase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(lowercase__ , lowercase__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
115
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class A_ ( _a ): '''simple docstring''' a__ = (IPNDMScheduler,) a__ = (("num_inference_steps", 50),) def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple: __UpperCAmelCase = {'''num_train_timesteps''': 1_000} config.update(**lowercase__ ) return config def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample __UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config(**lowercase__ ) __UpperCAmelCase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals __UpperCAmelCase = dummy_past_residuals[:] if time_step is None: __UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase__ ) __UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ ) new_scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals __UpperCAmelCase = dummy_past_residuals[:] __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase_ (self ) -> List[str]: pass def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample __UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config() __UpperCAmelCase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals (must be after setting timesteps) __UpperCAmelCase = dummy_past_residuals[:] if time_step is None: __UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase__ ) __UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase__ ) # copy over dummy past residual (must be after setting timesteps) __UpperCAmelCase = dummy_past_residuals[:] __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]: __UpperCAmelCase = self.scheduler_classes[0] __UpperCAmelCase = self.get_scheduler_config(**lowercase__ ) __UpperCAmelCase = scheduler_class(**lowercase__ ) __UpperCAmelCase = 10 __UpperCAmelCase = self.dummy_model() __UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(lowercase__ ) for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase = model(lowercase__ , lowercase__ ) __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase = model(lowercase__ , lowercase__ ) __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample return sample def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ ) for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config() __UpperCAmelCase = scheduler_class(**lowercase__ ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ): scheduler.set_timesteps(lowercase__ ) elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ): __UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __UpperCAmelCase = dummy_past_residuals[:] __UpperCAmelCase = scheduler.timesteps[5] __UpperCAmelCase = scheduler.timesteps[6] __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCAmelCase_ (self ) -> List[Any]: for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ ) def lowerCAmelCase_ (self ) -> str: __UpperCAmelCase = self.full_loop() __UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
333
0
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) logging.set_verbosity_info() def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: Tuple ): if "xprophetnet" in prophetnet_checkpoint_path: __SCREAMING_SNAKE_CASE : Union[str, Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCamelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = XLMProphetNetForConditionalGeneration.from_pretrained( _lowerCamelCase , output_loading_info=_lowerCamelCase ) else: __SCREAMING_SNAKE_CASE : str = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCamelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = ProphetNetForConditionalGeneration.from_pretrained( _lowerCamelCase , output_loading_info=_lowerCamelCase ) __SCREAMING_SNAKE_CASE : str = ["""key_proj""", """value_proj""", """query_proj"""] __SCREAMING_SNAKE_CASE : Union[str, Any] = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: __SCREAMING_SNAKE_CASE : Any = key.split(""".""" ) if attributes[0] == "lm_head": __SCREAMING_SNAKE_CASE : Tuple = prophet __SCREAMING_SNAKE_CASE : Optional[Any] = prophet_old else: __SCREAMING_SNAKE_CASE : Union[str, Any] = prophet.prophetnet __SCREAMING_SNAKE_CASE : List[Any] = prophet_old.model __SCREAMING_SNAKE_CASE : List[Any] = False for attribute in attributes: if attribute in mapping: __SCREAMING_SNAKE_CASE : str = mapping[attribute] if not hasattr(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = attribute elif hasattr(_lowerCamelCase , _lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[str] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __SCREAMING_SNAKE_CASE : Optional[Any] = old_model.weight logger.info(F"{attribute} is initialized." ) __SCREAMING_SNAKE_CASE : Optional[int] = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __SCREAMING_SNAKE_CASE : List[str] = old_model.bias logger.info(F"{attribute} is initialized" ) __SCREAMING_SNAKE_CASE : List[str] = True break elif attribute in special_keys and hasattr(_lowerCamelCase , """in_proj_weight""" ): __SCREAMING_SNAKE_CASE : Dict = old_model.in_proj_weight.shape[0] // 3 __SCREAMING_SNAKE_CASE : Any = getattr(_lowerCamelCase , _lowerCamelCase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __SCREAMING_SNAKE_CASE : Any = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __SCREAMING_SNAKE_CASE : Any = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __SCREAMING_SNAKE_CASE : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __SCREAMING_SNAKE_CASE : Optional[Any] = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings." __SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] ) __SCREAMING_SNAKE_CASE : Any = True break if attribute.isdigit(): __SCREAMING_SNAKE_CASE : List[str] = model[int(_lowerCamelCase )] __SCREAMING_SNAKE_CASE : str = old_model[int(_lowerCamelCase )] else: __SCREAMING_SNAKE_CASE : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if old_attribute == "": __SCREAMING_SNAKE_CASE : Optional[int] = old_model else: if not hasattr(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"{old_model} does not have {old_attribute}" ) __SCREAMING_SNAKE_CASE : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase ) if not is_key_init: raise ValueError(F"{key} was not correctly initialized!" ) print(F"Saving model to {pytorch_dump_folder_path}" ) prophet.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase__ : int = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
112
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A_ : '''simple docstring''' def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int: __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = num_channels __UpperCAmelCase = is_training __UpperCAmelCase = use_labels __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = num_labels __UpperCAmelCase = image_size __UpperCAmelCase = layer_depths __UpperCAmelCase = embed_dims def lowerCAmelCase_ (self ) -> str: __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ (self ) -> Optional[Any]: return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , ) def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int: __UpperCAmelCase = SwiftFormerModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() __UpperCAmelCase = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]: __UpperCAmelCase = self.num_labels __UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __UpperCAmelCase = model(lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) __UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = model(lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ (self ) -> Optional[int]: ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs() __UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A_ ( _a , _a , unittest.TestCase ): '''simple docstring''' a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () a__ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def lowerCAmelCase_ (self ) -> List[str]: __UpperCAmelCase = SwiftFormerModelTester(self ) __UpperCAmelCase = ConfigTester( self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def lowerCAmelCase_ (self ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' ) def lowerCAmelCase_ (self ) -> List[Any]: pass def lowerCAmelCase_ (self ) -> Any: __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(lowercase__ ) __UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(lowercase__ ) __UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase = [*signature.parameters.keys()] __UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase__ ) @slow def lowerCAmelCase_ (self ) -> Any: for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) @unittest.skip(reason='''SwiftFormer does not output attentions''' ) def lowerCAmelCase_ (self ) -> List[str]: pass def lowerCAmelCase_ (self ) -> Union[str, Any]: def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ): __UpperCAmelCase = model_class(lowercase__ ) model.to(lowercase__ ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) ) __UpperCAmelCase = outputs.hidden_states __UpperCAmelCase = 8 self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(lowercase__ ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = True check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: def _config_zero_init(lowercase__ ): __UpperCAmelCase = copy.deepcopy(lowercase__ ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(lowercase__ , lowercase__ , 1E-10 ) if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ): __UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) ) setattr(lowercase__ , lowercase__ , lowercase__ ) return configs_no_init __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = _config_zero_init(lowercase__ ) for model_class in self.all_model_classes: __UpperCAmelCase = model_class(config=lowercase__ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowerCAmelCase_ (self ) -> Optional[Any]: pass def __a ( ) -> Any: '''simple docstring''' __UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class A_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ (self ) -> str: return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None @slow def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ ) __UpperCAmelCase = self.default_image_processor __UpperCAmelCase = prepare_img() __UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ ) # forward pass with torch.no_grad(): __UpperCAmelCase = model(**lowercase__ ) # verify the logits __UpperCAmelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowercase__ ) __UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
333
0
import math import sys def lowercase_ ( _lowerCamelCase : str): if number != int(_lowerCamelCase): raise ValueError("the value of input must be a natural number") if number < 0: raise ValueError("the value of input must not be a negative number") if number == 0: return 1 lowercase__ : Union[str, Any] = [-1] * (number + 1) lowercase__ : Union[str, Any] = 0 for i in range(1 , number + 1): lowercase__ : Optional[int] = sys.maxsize lowercase__ : Any = int(math.sqrt(_lowerCamelCase)) for j in range(1 , root + 1): lowercase__ : Optional[int] = 1 + answers[i - (j**2)] lowercase__ : Any = min(_lowerCamelCase , _lowerCamelCase) lowercase__ : Dict = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
87
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES A_ : str = logging.get_logger(__name__) A_ : str = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) A_ : Optional[int] = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) A_ : Union[str, Any] = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) A_ : Dict = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) A_ : Optional[int] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) A_ : Dict = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) A_ : List[str] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) A_ : Tuple = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) A_ : Optional[int] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) A_ : int = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) A_ : Tuple = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) A_ : Tuple = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) A_ : int = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) A_ : Tuple = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) A_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) A_ : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) A_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) A_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) A_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) A_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) A_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) A_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) A_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_MAPPING A_ : Tuple = auto_class_update(FlaxAutoModel) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING A_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING A_ : Tuple = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING A_ : Dict = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING A_ : Tuple = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING A_ : int = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING A_ : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
333
0
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def __lowercase ( a__ , a__ , a__ ) -> Optional[int]: __SCREAMING_SNAKE_CASE = MobileBertConfig.from_json_file(a__ ) print(f"""Building PyTorch model from configuration: {config}""" ) __SCREAMING_SNAKE_CASE = MobileBertForPreTraining(a__ ) # Load weights from tf checkpoint __SCREAMING_SNAKE_CASE = load_tf_weights_in_mobilebert(a__ , a__ , a__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , a__ ) if __name__ == "__main__": lowerCAmelCase__ : Tuple =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--mobilebert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained MobileBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ : str =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
257
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging A_ : Tuple = logging.get_logger(__name__) class A_ ( _a ): '''simple docstring''' a__ = "linear" a__ = "cosine" a__ = "cosine_with_restarts" a__ = "polynomial" a__ = "constant" a__ = "constant_with_warmup" a__ = "piecewise_constant" def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple: '''simple docstring''' return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) ) return 1.0 return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = {} __UpperCAmelCase = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: __UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' ) __UpperCAmelCase = int(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = float(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = value __UpperCAmelCase = float(rule_list[-1] ) def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): def rule_func(SCREAMING_SNAKE_CASE ) -> float: __UpperCAmelCase = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func __UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]: '''simple docstring''' __UpperCAmelCase = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' ) def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: __UpperCAmelCase = lr_init - lr_end __UpperCAmelCase = num_training_steps - num_warmup_steps __UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps __UpperCAmelCase = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , ) return schedule_func( SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
333
0
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] ): '''simple docstring''' lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ ) lowerCamelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase__ ) lowerCamelCase = checkpoints.load_tax_checkpoint(lowerCamelCase__ ) lowerCamelCase = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""] if config.model_type == "t5": lowerCamelCase = """SelfAttention""" if config.model_type == "longt5" and config.encoder_attention_type == "local": lowerCamelCase = """LocalSelfAttention""" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase = """TransientGlobalSelfAttention""" else: raise ValueError( """Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`""" """ attribute with a value from [\'local\', \'transient-global].""" ) # Encoder for layer_index in range(config.num_layers ): lowerCamelCase = f'layers_{str(lowerCamelCase__ )}' # Self-Attention lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""] lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""] lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""] lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""] # Layer Normalization lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""] if split_mlp_wi: lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning lowerCamelCase = flax_model.params["""encoder"""]["""block"""][str(lowerCamelCase__ )]["""layer"""] lowerCamelCase = tax_attention_key lowerCamelCase = tax_attention_out lowerCamelCase = tax_attention_query lowerCamelCase = tax_attention_value lowerCamelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase = tax_global_layer_norm if split_mlp_wi: lowerCamelCase = tax_mlp_wi_a lowerCamelCase = tax_mlp_wi_a else: lowerCamelCase = tax_mlp_wi lowerCamelCase = tax_mlp_wo lowerCamelCase = tax_mlp_layer_norm lowerCamelCase = flax_model_encoder_layer_block # Only for layer 0: lowerCamelCase = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T lowerCamelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T lowerCamelCase = tax_encoder_global_rel_embedding # Assigning lowerCamelCase = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""] lowerCamelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowerCamelCase = f'layers_{str(lowerCamelCase__ )}' # Self-Attention lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""] lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""] lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""] lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""] # Layer Normalization lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][ """scale""" ] # Encoder-Decoder-Attention lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""] lowerCamelCase = tax_enc_dec_attention_module["""key"""]["""kernel"""] lowerCamelCase = tax_enc_dec_attention_module["""out"""]["""kernel"""] lowerCamelCase = tax_enc_dec_attention_module["""query"""]["""kernel"""] lowerCamelCase = tax_enc_dec_attention_module["""value"""]["""kernel"""] # Layer Normalization lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""] # MLP if split_mlp_wi: lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning lowerCamelCase = flax_model.params["""decoder"""]["""block"""][str(lowerCamelCase__ )]["""layer"""] lowerCamelCase = tax_attention_key lowerCamelCase = tax_attention_out lowerCamelCase = tax_attention_query lowerCamelCase = tax_attention_value lowerCamelCase = tax_pre_attention_layer_norm lowerCamelCase = tax_enc_dec_attention_key lowerCamelCase = tax_enc_dec_attention_out lowerCamelCase = tax_enc_dec_attention_query lowerCamelCase = tax_enc_dec_attention_value lowerCamelCase = tax_cross_layer_norm if split_mlp_wi: lowerCamelCase = tax_mlp_wi_a lowerCamelCase = tax_mlp_wi_a else: lowerCamelCase = tax_mlp_wi lowerCamelCase = tax_mlp_wo lowerCamelCase = txa_mlp_layer_norm lowerCamelCase = flax_model_decoder_layer_block # Decoder Normalization lowerCamelCase = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""] lowerCamelCase = txa_decoder_norm # Only for layer 0: lowerCamelCase = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T lowerCamelCase = tax_decoder_rel_embedding # Token Embeddings lowerCamelCase = tax_model["""target"""]["""token_embedder"""]["""embedding"""] lowerCamelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowerCamelCase = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""] flax_model.save_pretrained(lowerCamelCase__ ) print("""T5X Model was sucessfully converted!""" ) if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) UpperCAmelCase : int = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
252
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list: '''simple docstring''' __UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )] for i in range(SCREAMING_SNAKE_CASE ): __UpperCAmelCase = y_points[i] for i in range(2 , SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __UpperCAmelCase = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
333
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : int = { 'microsoft/table-transformer-detection': ( 'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json' ), } class lowerCAmelCase ( _a ): '''simple docstring''' _A : Any = '''table-transformer''' _A : Optional[Any] = ['''past_key_values'''] _A : Optional[int] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : int , __a : List[str]=True , __a : Union[str, Any]=None , __a : Union[str, Any]=3 , __a : Union[str, Any]=100 , __a : int=6 , __a : Tuple=2048 , __a : Dict=8 , __a : Dict=6 , __a : Optional[int]=2048 , __a : List[str]=8 , __a : Optional[Any]=0.0 , __a : List[Any]=0.0 , __a : List[Any]=True , __a : Union[str, Any]="relu" , __a : Union[str, Any]=256 , __a : Dict=0.1 , __a : Optional[Any]=0.0 , __a : Union[str, Any]=0.0 , __a : str=0.02 , __a : Union[str, Any]=1.0 , __a : Dict=False , __a : Dict="sine" , __a : Any="resnet50" , __a : Dict=True , __a : List[str]=False , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : List[Any]=1 , __a : Any=1 , __a : Optional[int]=5 , __a : Optional[int]=2 , __a : Tuple=0.1 , **__a : str , ) -> Optional[Any]: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) __lowercase : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowercase__ , lowercase__ ): __lowercase : Optional[int] = backbone_config.get("""model_type""" ) __lowercase : Optional[Any] = CONFIG_MAPPING[backbone_model_type] __lowercase : List[str] = config_class.from_dict(lowercase__ ) # set timm attributes to None __lowercase , __lowercase , __lowercase : Union[str, Any] = None, None, None __lowercase : List[Any] = use_timm_backbone __lowercase : Optional[int] = backbone_config __lowercase : str = num_channels __lowercase : List[Any] = num_queries __lowercase : List[str] = d_model __lowercase : int = encoder_ffn_dim __lowercase : str = encoder_layers __lowercase : str = encoder_attention_heads __lowercase : Union[str, Any] = decoder_ffn_dim __lowercase : str = decoder_layers __lowercase : Union[str, Any] = decoder_attention_heads __lowercase : str = dropout __lowercase : Union[str, Any] = attention_dropout __lowercase : List[str] = activation_dropout __lowercase : List[Any] = activation_function __lowercase : str = init_std __lowercase : Optional[int] = init_xavier_std __lowercase : Tuple = encoder_layerdrop __lowercase : int = decoder_layerdrop __lowercase : Optional[Any] = encoder_layers __lowercase : Dict = auxiliary_loss __lowercase : int = position_embedding_type __lowercase : List[Any] = backbone __lowercase : Optional[int] = use_pretrained_backbone __lowercase : List[str] = dilation # Hungarian matcher __lowercase : Union[str, Any] = class_cost __lowercase : str = bbox_cost __lowercase : int = giou_cost # Loss coefficients __lowercase : Dict = mask_loss_coefficient __lowercase : Optional[int] = dice_loss_coefficient __lowercase : Dict = bbox_loss_coefficient __lowercase : Any = giou_loss_coefficient __lowercase : Optional[int] = eos_coefficient super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ ) @property def lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" return self.encoder_attention_heads @property def lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" return self.d_model class lowerCAmelCase ( _a ): '''simple docstring''' _A : Dict = version.parse('''1.11''' ) @property def lowerCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def lowerCAmelCase ( self : Any ) -> float: """simple docstring""" return 1E-5 @property def lowerCAmelCase ( self : str ) -> int: """simple docstring""" return 12
233
def __a ( SCREAMING_SNAKE_CASE ) -> set: '''simple docstring''' __UpperCAmelCase = set() # edges = list of graph's edges __UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: __UpperCAmelCase , __UpperCAmelCase = edges.pop() chosen_vertices.add(SCREAMING_SNAKE_CASE ) chosen_vertices.add(SCREAMING_SNAKE_CASE ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(SCREAMING_SNAKE_CASE ) return chosen_vertices def __a ( SCREAMING_SNAKE_CASE ) -> set: '''simple docstring''' __UpperCAmelCase = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
333
0
from __future__ import annotations def UpperCAmelCase_ ( _A , _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = get_failure_array(_A ) # 2) Step through text searching for pattern SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 0, 0 # index into text, pattern while i < len(_A ): if pattern[j] == text[i]: if j == (len(_A ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: SCREAMING_SNAKE_CASE__ = failure[j - 1] continue i += 1 return False def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [0] SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 1 while j < len(_A ): if pattern[i] == pattern[j]: i += 1 elif i > 0: SCREAMING_SNAKE_CASE__ = failure[i - 1] continue j += 1 failure.append(_A ) return failure if __name__ == "__main__": # Test 1) _SCREAMING_SNAKE_CASE : int = 'abc1abc12' _SCREAMING_SNAKE_CASE : str = 'alskfjaldsabc1abc1abc12k23adsfabcabc' _SCREAMING_SNAKE_CASE : int = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) _SCREAMING_SNAKE_CASE : List[str] = 'ABABX' _SCREAMING_SNAKE_CASE : Any = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) _SCREAMING_SNAKE_CASE : Union[str, Any] = 'AAAB' _SCREAMING_SNAKE_CASE : str = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) _SCREAMING_SNAKE_CASE : Dict = 'abcdabcy' _SCREAMING_SNAKE_CASE : Union[str, Any] = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) _SCREAMING_SNAKE_CASE : Optional[int] = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
314
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} A_ : int = ['a', 'b', 'c', 'd', 'e'] def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = start # add current to visited visited.append(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # if all neighbors visited add current to sort sort.append(SCREAMING_SNAKE_CASE ) # if all vertices haven't been visited select a new one to visit if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ): for vertice in vertices: if vertice not in visited: __UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # return sort return sort if __name__ == "__main__": A_ : Tuple = topological_sort('a', [], []) print(sort)
333
0
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig A_ = logging.get_logger(__name__) # General docstring A_ = 'PoolFormerConfig' # Base docstring A_ = 'sail/poolformer_s12' A_ = [1, 5_12, 7, 7] # Image classification docstring A_ = 'sail/poolformer_s12' A_ = 'tabby, tabby cat' A_ = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : List[str] = 0.0 , snake_case__ : Tuple = False ): """simple docstring""" if drop_prob == 0.0 or not training: return input _snake_case : Optional[Any] = 1 - drop_prob _snake_case : List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets _snake_case : int = keep_prob + torch.rand(snake_case__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize _snake_case : Optional[Any] = input.div(snake_case__ ) * random_tensor return output class lowercase( nn.Module ): '''simple docstring''' def __init__( self: Dict, a_: List[str] = None ): '''simple docstring''' super().__init__() _snake_case : List[str] = drop_prob def UpperCamelCase_ ( self: Optional[int], a_: List[Any] ): '''simple docstring''' return drop_path(lowercase__, self.drop_prob, self.training ) def UpperCamelCase_ ( self: int ): '''simple docstring''' return "p={}".format(self.drop_prob ) class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[str], a_: str, a_: str, a_: Any, a_: Dict, a_: List[Any], a_: Optional[Any]=None ): '''simple docstring''' super().__init__() _snake_case : Dict = patch_size if isinstance(lowercase__, collections.abc.Iterable ) else (patch_size, patch_size) _snake_case : Any = stride if isinstance(lowercase__, collections.abc.Iterable ) else (stride, stride) _snake_case : int = padding if isinstance(lowercase__, collections.abc.Iterable ) else (padding, padding) _snake_case : Dict = nn.Convad(lowercase__, lowercase__, kernel_size=lowercase__, stride=lowercase__, padding=lowercase__ ) _snake_case : int = norm_layer(lowercase__ ) if norm_layer else nn.Identity() def UpperCamelCase_ ( self: Optional[Any], a_: Any ): '''simple docstring''' _snake_case : Dict = self.projection(lowercase__ ) _snake_case : Any = self.norm(lowercase__ ) return embeddings class lowercase( nn.GroupNorm ): '''simple docstring''' def __init__( self: Dict, a_: Optional[Any], **a_: Any ): '''simple docstring''' super().__init__(1, lowercase__, **lowercase__ ) class lowercase( nn.Module ): '''simple docstring''' def __init__( self: int, a_: Dict ): '''simple docstring''' super().__init__() _snake_case : Dict = nn.AvgPoolad(lowercase__, stride=1, padding=pool_size // 2, count_include_pad=lowercase__ ) def UpperCamelCase_ ( self: int, a_: Optional[Any] ): '''simple docstring''' return self.pool(lowercase__ ) - hidden_states class lowercase( nn.Module ): '''simple docstring''' def __init__( self: Union[str, Any], a_: Optional[Any], a_: List[str], a_: List[str], a_: List[str] ): '''simple docstring''' super().__init__() _snake_case : Optional[Any] = nn.Convad(lowercase__, lowercase__, 1 ) _snake_case : List[str] = nn.Convad(lowercase__, lowercase__, 1 ) _snake_case : Any = PoolFormerDropPath(lowercase__ ) if isinstance(config.hidden_act, lowercase__ ): _snake_case : int = ACTaFN[config.hidden_act] else: _snake_case : Tuple = config.hidden_act def UpperCamelCase_ ( self: List[str], a_: Tuple ): '''simple docstring''' _snake_case : Dict = self.conva(lowercase__ ) _snake_case : List[str] = self.act_fn(lowercase__ ) _snake_case : Dict = self.drop(lowercase__ ) _snake_case : List[str] = self.conva(lowercase__ ) _snake_case : Optional[int] = self.drop(lowercase__ ) return hidden_states class lowercase( nn.Module ): '''simple docstring''' def __init__( self: Dict, a_: List[str], a_: str, a_: Tuple, a_: List[Any], a_: Any, a_: Tuple ): '''simple docstring''' super().__init__() _snake_case : str = PoolFormerPooling(lowercase__ ) _snake_case : Union[str, Any] = PoolFormerOutput(lowercase__, lowercase__, lowercase__, lowercase__ ) _snake_case : Any = PoolFormerGroupNorm(lowercase__ ) _snake_case : Tuple = PoolFormerGroupNorm(lowercase__ ) # Useful for training neural nets _snake_case : Dict = PoolFormerDropPath(lowercase__ ) if drop_path > 0.0 else nn.Identity() _snake_case : List[str] = config.use_layer_scale if config.use_layer_scale: _snake_case : Union[str, Any] = nn.Parameter( config.layer_scale_init_value * torch.ones((lowercase__) ), requires_grad=lowercase__ ) _snake_case : Dict = nn.Parameter( config.layer_scale_init_value * torch.ones((lowercase__) ), requires_grad=lowercase__ ) def UpperCamelCase_ ( self: List[Any], a_: int ): '''simple docstring''' if self.use_layer_scale: _snake_case : Dict = self.pooling(self.before_norm(lowercase__ ) ) _snake_case : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection _snake_case : Union[str, Any] = hidden_states + self.drop_path(lowercase__ ) _snake_case : Dict = () _snake_case : Any = self.output(self.after_norm(lowercase__ ) ) _snake_case : Tuple = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection _snake_case : Union[str, Any] = hidden_states + self.drop_path(lowercase__ ) _snake_case : int = (output,) + outputs return outputs else: _snake_case : List[Any] = self.drop_path(self.pooling(self.before_norm(lowercase__ ) ) ) # First residual connection _snake_case : Any = pooling_output + hidden_states _snake_case : Dict = () # Second residual connection inside the PoolFormerOutput block _snake_case : Optional[int] = self.drop_path(self.output(self.after_norm(lowercase__ ) ) ) _snake_case : Any = hidden_states + layer_output _snake_case : int = (output,) + outputs return outputs class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[str], a_: List[Any] ): '''simple docstring''' super().__init__() _snake_case : str = config # stochastic depth decay rule _snake_case : Union[str, Any] = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths ) )] # patch embeddings _snake_case : List[str] = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i], stride=config.strides[i], padding=config.padding[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], ) ) _snake_case : List[Any] = nn.ModuleList(lowercase__ ) # Transformer blocks _snake_case : Union[str, Any] = [] _snake_case : Any = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers _snake_case : Tuple = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( lowercase__, num_channels=config.hidden_sizes[i], pool_size=config.pool_size, hidden_size=config.hidden_sizes[i], intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ), drop_path=dpr[cur + j], ) ) blocks.append(nn.ModuleList(lowercase__ ) ) _snake_case : Dict = nn.ModuleList(lowercase__ ) def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: List[Any]=False, a_: List[Any]=True ): '''simple docstring''' _snake_case : List[str] = () if output_hidden_states else None _snake_case : List[str] = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings, self.block ) ): _snake_case , _snake_case : int = layers # Get patch embeddings from hidden_states _snake_case : List[str] = embedding_layer(lowercase__ ) # Send the embeddings through the blocks for _, blk in enumerate(lowercase__ ): _snake_case : Optional[Any] = blk(lowercase__ ) _snake_case : str = layer_outputs[0] if output_hidden_states: _snake_case : str = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase__, hidden_states=lowercase__ ) class lowercase( _a ): '''simple docstring''' lowercase__ = PoolFormerConfig lowercase__ = "poolformer" lowercase__ = "pixel_values" lowercase__ = True def UpperCamelCase_ ( self: Tuple, a_: Tuple ): '''simple docstring''' if isinstance(lowercase__, (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(lowercase__, nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def UpperCamelCase_ ( self: Any, a_: Union[str, Any], a_: Tuple=False ): '''simple docstring''' if isinstance(lowercase__, lowercase__ ): _snake_case : Tuple = value A_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' A_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , _a , ) class lowercase( _a ): '''simple docstring''' def __init__( self: Optional[Any], a_: Optional[int] ): '''simple docstring''' super().__init__(lowercase__ ) _snake_case : Optional[Any] = config _snake_case : Tuple = PoolFormerEncoder(lowercase__ ) # Initialize weights and apply final processing self.post_init() def UpperCamelCase_ ( self: int ): '''simple docstring''' return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(lowercase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=lowercase__, config_class=_CONFIG_FOR_DOC, modality="""vision""", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def UpperCamelCase_ ( self: List[Any], a_: List[Any] = None, a_: Dict = None, a_: List[str] = None, ): '''simple docstring''' _snake_case : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) _snake_case : Union[str, Any] = self.encoder( lowercase__, output_hidden_states=lowercase__, return_dict=lowercase__, ) _snake_case : Union[str, Any] = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=lowercase__, hidden_states=encoder_outputs.hidden_states, ) class lowercase( nn.Module ): '''simple docstring''' def __init__( self: Any, a_: int ): '''simple docstring''' super().__init__() _snake_case : Optional[Any] = nn.Linear(config.hidden_size, config.hidden_size ) def UpperCamelCase_ ( self: Any, a_: Optional[int] ): '''simple docstring''' _snake_case : Optional[int] = self.dense(lowercase__ ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , _a , ) class lowercase( _a ): '''simple docstring''' def __init__( self: int, a_: str ): '''simple docstring''' super().__init__(lowercase__ ) _snake_case : Optional[int] = config.num_labels _snake_case : int = PoolFormerModel(lowercase__ ) # Final norm _snake_case : Dict = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head _snake_case : Any = ( nn.Linear(config.hidden_sizes[-1], config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=lowercase__, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def UpperCamelCase_ ( self: str, a_: int = None, a_: Union[str, Any] = None, a_: Dict = None, a_: Optional[Any] = None, ): '''simple docstring''' _snake_case : Any = return_dict if return_dict is not None else self.config.use_return_dict _snake_case : Optional[Any] = self.poolformer( lowercase__, output_hidden_states=lowercase__, return_dict=lowercase__, ) _snake_case : Dict = outputs[0] _snake_case : Union[str, Any] = self.classifier(self.norm(lowercase__ ).mean([-2, -1] ) ) _snake_case : Union[str, Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case : Optional[int] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case : List[str] = """single_label_classification""" else: _snake_case : Optional[Any] = """multi_label_classification""" if self.config.problem_type == "regression": _snake_case : int = MSELoss() if self.num_labels == 1: _snake_case : Tuple = loss_fct(logits.squeeze(), labels.squeeze() ) else: _snake_case : Tuple = loss_fct(lowercase__, lowercase__ ) elif self.config.problem_type == "single_label_classification": _snake_case : List[str] = CrossEntropyLoss() _snake_case : Dict = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _snake_case : Dict = BCEWithLogitsLoss() _snake_case : Dict = loss_fct(lowercase__, lowercase__ ) if not return_dict: _snake_case : Optional[int] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase__, logits=lowercase__, hidden_states=outputs.hidden_states )
64
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A_ : int = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
333
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class UpperCAmelCase_ ( _a ): UpperCamelCase ="unispeech" def __init__( self , UpperCamelCase_=32 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_="group" , UpperCamelCase_="gelu" , UpperCamelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase_=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase_=False , UpperCamelCase_=1_28 , UpperCamelCase_=16 , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0.0_5 , UpperCamelCase_=10 , UpperCamelCase_=2 , UpperCamelCase_=0.0 , UpperCamelCase_=10 , UpperCamelCase_=0 , UpperCamelCase_=3_20 , UpperCamelCase_=2 , UpperCamelCase_=0.1 , UpperCamelCase_=1_00 , UpperCamelCase_=2_56 , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_="mean" , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=2_56 , UpperCamelCase_=80 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=0.5 , **UpperCamelCase_ , ) -> str: super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) __lowercase : str = hidden_size __lowercase : str = feat_extract_norm __lowercase : Optional[Any] = feat_extract_activation __lowercase : Union[str, Any] = list(lowercase__ ) __lowercase : Dict = list(lowercase__ ) __lowercase : Optional[Any] = list(lowercase__ ) __lowercase : Union[str, Any] = conv_bias __lowercase : Any = num_conv_pos_embeddings __lowercase : List[Any] = num_conv_pos_embedding_groups __lowercase : int = len(self.conv_dim ) __lowercase : Any = num_hidden_layers __lowercase : Optional[Any] = intermediate_size __lowercase : List[str] = hidden_act __lowercase : str = num_attention_heads __lowercase : List[Any] = hidden_dropout __lowercase : Union[str, Any] = attention_dropout __lowercase : Any = activation_dropout __lowercase : Optional[int] = feat_proj_dropout __lowercase : int = final_dropout __lowercase : Any = layerdrop __lowercase : List[str] = layer_norm_eps __lowercase : Dict = initializer_range __lowercase : int = num_ctc_classes __lowercase : Union[str, Any] = vocab_size __lowercase : Optional[Any] = do_stable_layer_norm __lowercase : List[Any] = use_weighted_layer_sum __lowercase : List[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowercase : Optional[Any] = apply_spec_augment __lowercase : int = mask_time_prob __lowercase : Optional[Any] = mask_time_length __lowercase : Tuple = mask_time_min_masks __lowercase : str = mask_feature_prob __lowercase : Union[str, Any] = mask_feature_length __lowercase : Tuple = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __lowercase : Dict = num_codevectors_per_group __lowercase : int = num_codevector_groups __lowercase : Dict = contrastive_logits_temperature __lowercase : int = feat_quantizer_dropout __lowercase : Dict = num_negatives __lowercase : Tuple = codevector_dim __lowercase : Tuple = proj_codevector_dim __lowercase : Tuple = diversity_loss_weight # ctc loss __lowercase : Tuple = ctc_loss_reduction __lowercase : Any = ctc_zero_infinity # pretraining loss __lowercase : Union[str, Any] = replace_prob @property def _lowerCamelCase ( self ) -> int: return functools.reduce(operator.mul , self.conv_stride , 1 )
249
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: '''simple docstring''' model.train() __UpperCAmelCase = model(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]: '''simple docstring''' set_seed(4_2 ) __UpperCAmelCase = RegressionModel() __UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = RegressionDataset(length=8_0 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) model.to(accelerator.device ) if sched: __UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 ) __UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 ) __UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 ) __UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 ) # Make a copy of `model` if sched: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' # Test when on a single CPU or GPU that the context manager does nothing __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) # Use a single batch __UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] def __a ( SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' # Test on distributed setup that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) # Use a single batch __UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]: '''simple docstring''' __UpperCAmelCase = Accelerator( split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ): __UpperCAmelCase , __UpperCAmelCase = batch.values() # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) with accelerator.accumulate(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] GradientState._reset_state() def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = Accelerator( split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ): __UpperCAmelCase , __UpperCAmelCase = batch.values() # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' __UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )) if accelerator.num_processes > 1: check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def __a ( ) -> str: '''simple docstring''' __UpperCAmelCase = Accelerator() __UpperCAmelCase = RegressionDataset(length=8_0 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) __UpperCAmelCase = RegressionDataset(length=9_6 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE ) if iteration < len(SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE ) if batch_num < len(SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __a ( ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase = Accelerator() __UpperCAmelCase = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(SCREAMING_SNAKE_CASE ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(SCREAMING_SNAKE_CASE ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
333
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class __A( _a ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """marian""" SCREAMING_SNAKE_CASE__ = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__(self , SCREAMING_SNAKE_CASE_=5_81_01 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=5_81_00 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=5_81_00 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = vocab_size UpperCamelCase__ = decoder_vocab_size or vocab_size UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = d_model UpperCamelCase__ = encoder_ffn_dim UpperCamelCase__ = encoder_layers UpperCamelCase__ = encoder_attention_heads UpperCamelCase__ = decoder_ffn_dim UpperCamelCase__ = decoder_layers UpperCamelCase__ = decoder_attention_heads UpperCamelCase__ = dropout UpperCamelCase__ = attention_dropout UpperCamelCase__ = activation_dropout UpperCamelCase__ = activation_function UpperCamelCase__ = init_std UpperCamelCase__ = encoder_layerdrop UpperCamelCase__ = decoder_layerdrop UpperCamelCase__ = use_cache UpperCamelCase__ = encoder_layers UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True UpperCamelCase__ = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , ) class __A( _a ): """simple docstring""" @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def UpperCAmelCase_ (self ): if self.task in ["default", "seq2seq-lm"]: UpperCamelCase__ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: UpperCamelCase__ = {0: """batch"""} UpperCamelCase__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: UpperCamelCase__ = {0: """batch""", 1: """decoder_sequence"""} UpperCamelCase__ = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(lowercase__ , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase__ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: UpperCamelCase__ , UpperCamelCase__ = self.num_layers for i in range(lowercase__ ): UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""} UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""} else: UpperCamelCase__ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def UpperCAmelCase_ (self ): if self.task in ["default", "seq2seq-lm"]: UpperCamelCase__ = super().outputs else: UpperCamelCase__ = super(lowercase__ , self ).outputs if self.use_past: UpperCamelCase__ , UpperCamelCase__ = self.num_layers for i in range(lowercase__ ): UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""} UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ): UpperCamelCase__ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Generate decoder inputs UpperCamelCase__ = seq_length if not self.use_past else 1 UpperCamelCase__ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) UpperCamelCase__ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} UpperCamelCase__ = dict(**lowercase__ , **lowercase__ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch UpperCamelCase__ , UpperCamelCase__ = common_inputs["""input_ids"""].shape UpperCamelCase__ = common_inputs["""decoder_input_ids"""].shape[1] UpperCamelCase__ , UpperCamelCase__ = self.num_attention_heads UpperCamelCase__ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase__ = decoder_seq_length + 3 UpperCamelCase__ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase__ = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(lowercase__ , lowercase__ )] , dim=1 ) UpperCamelCase__ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase__ , UpperCamelCase__ = self.num_layers UpperCamelCase__ = min(lowercase__ , lowercase__ ) UpperCamelCase__ = max(lowercase__ , lowercase__ ) - min_num_layers UpperCamelCase__ = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(lowercase__ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), ) ) # TODO: test this. UpperCamelCase__ = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(lowercase__ , lowercase__ ): common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) ) return common_inputs def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ): UpperCamelCase__ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch UpperCamelCase__ , UpperCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values UpperCamelCase__ = seqlen + 2 UpperCamelCase__ , UpperCamelCase__ = self.num_layers UpperCamelCase__ , UpperCamelCase__ = self.num_attention_heads UpperCamelCase__ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase__ = common_inputs["""attention_mask"""].dtype UpperCamelCase__ = torch.cat( [common_inputs["""attention_mask"""], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 ) UpperCamelCase__ = [ (torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ ) ] return common_inputs def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase__ = compute_effective_axis_dimension( lowercase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase__ = tokenizer.num_special_tokens_to_add(lowercase__ ) UpperCamelCase__ = compute_effective_axis_dimension( lowercase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase__ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase__ = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase__ = dict(tokenizer(lowercase__ , return_tensors=lowercase__ ) ) return common_inputs def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ): if self.task in ["default", "seq2seq-lm"]: UpperCamelCase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ ) else: UpperCamelCase__ = self._generate_dummy_inputs_for_causal_lm( lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ ) return common_inputs def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if self.task in ["default", "seq2seq-lm"]: UpperCamelCase__ = super()._flatten_past_key_values_(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: UpperCamelCase__ = super(lowercase__ , self )._flatten_past_key_values_( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) @property def UpperCAmelCase_ (self ): return 1E-4
244
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore A_ : Optional[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" A_ : Optional[Any] = [file for file in filepaths if file != file.lower()] if upper_files: print(F"""{len(upper_files)} files contain uppercase characters:""") print('\n'.join(upper_files) + '\n') A_ : Tuple = [file for file in filepaths if ' ' in file] if space_files: print(F"""{len(space_files)} files contain space characters:""") print('\n'.join(space_files) + '\n') A_ : str = [file for file in filepaths if '-' in file] if hyphen_files: print(F"""{len(hyphen_files)} files contain hyphen characters:""") print('\n'.join(hyphen_files) + '\n') A_ : Optional[Any] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"""{len(nodir_files)} files are not in a directory:""") print('\n'.join(nodir_files) + '\n') A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
333
0
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=None ) -> Any: '''simple docstring''' lowerCAmelCase : Any = None if token is not None: lowerCAmelCase : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"} lowerCAmelCase : str = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" lowerCAmelCase : int = requests.get(_UpperCAmelCase, headers=_UpperCAmelCase ).json() lowerCAmelCase : Union[str, Any] = {} try: job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) lowerCAmelCase : Optional[int] = math.ceil((result['total_count'] - 100) / 100 ) for i in range(_UpperCAmelCase ): lowerCAmelCase : List[str] = requests.get(url + f"&page={i + 2}", headers=_UpperCAmelCase ).json() job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) return job_links except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=None ) -> Dict: '''simple docstring''' lowerCAmelCase : Optional[Any] = None if token is not None: lowerCAmelCase : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"} lowerCAmelCase : Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" lowerCAmelCase : Optional[int] = requests.get(_UpperCAmelCase, headers=_UpperCAmelCase ).json() lowerCAmelCase : Optional[Any] = {} try: artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) lowerCAmelCase : int = math.ceil((result['total_count'] - 100) / 100 ) for i in range(_UpperCAmelCase ): lowerCAmelCase : int = requests.get(url + f"&page={i + 2}", headers=_UpperCAmelCase ).json() artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) return artifacts except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[Any]: '''simple docstring''' lowerCAmelCase : Union[str, Any] = None if token is not None: lowerCAmelCase : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"} lowerCAmelCase : Any = requests.get(_UpperCAmelCase, headers=_UpperCAmelCase, allow_redirects=_UpperCAmelCase ) lowerCAmelCase : List[Any] = result.headers['Location'] lowerCAmelCase : List[Any] = requests.get(_UpperCAmelCase, allow_redirects=_UpperCAmelCase ) lowerCAmelCase : Any = os.path.join(_UpperCAmelCase, f"{artifact_name}.zip" ) with open(_UpperCAmelCase, 'wb' ) as fp: fp.write(response.content ) def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=None ) -> Optional[int]: '''simple docstring''' lowerCAmelCase : List[Any] = [] lowerCAmelCase : Any = [] lowerCAmelCase : Optional[int] = None with zipfile.ZipFile(_UpperCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_UpperCAmelCase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(_UpperCAmelCase ) as f: for line in f: lowerCAmelCase : str = line.decode('UTF-8' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs lowerCAmelCase : str = line[: line.index(': ' )] lowerCAmelCase : List[Any] = line[line.index(': ' ) + len(': ' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('FAILED ' ): # `test` is the test method that failed lowerCAmelCase : Any = line[len('FAILED ' ) :] failed_tests.append(_UpperCAmelCase ) elif filename == "job_name.txt": lowerCAmelCase : List[Any] = line if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError( f"`errors` and `failed_tests` should have the same number of elements. Got {len(_UpperCAmelCase )} for `errors` " f"and {len(_UpperCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" ' problem.' ) lowerCAmelCase : Optional[int] = None if job_name and job_links: lowerCAmelCase : Tuple = job_links.get(_UpperCAmelCase, _UpperCAmelCase ) # A list with elements of the form (line of error, error, failed test) lowerCAmelCase : Any = [x + [y] + [job_link] for x, y in zip(_UpperCAmelCase, _UpperCAmelCase )] return result def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=None ) -> Dict: '''simple docstring''' lowerCAmelCase : Tuple = [] lowerCAmelCase : int = [os.path.join(_UpperCAmelCase, _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if p.endswith('.zip' )] for p in paths: errors.extend(get_errors_from_single_artifact(_UpperCAmelCase, job_links=_UpperCAmelCase ) ) return errors def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=None ) -> Optional[int]: '''simple docstring''' lowerCAmelCase : Tuple = Counter() counter.update([x[1] for x in logs] ) lowerCAmelCase : Dict = counter.most_common() lowerCAmelCase : Any = {} for error, count in counts: if error_filter is None or error not in error_filter: lowerCAmelCase : List[Any] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]} lowerCAmelCase : Tuple = dict(sorted(r.items(), key=lambda _UpperCAmelCase : item[1]["count"], reverse=_UpperCAmelCase ) ) return r def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[int]: '''simple docstring''' lowerCAmelCase : Optional[Any] = test.split('::' )[0] if test.startswith('tests/models/' ): lowerCAmelCase : Dict = test.split('/' )[2] else: lowerCAmelCase : Optional[int] = None return test def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=None ) -> List[str]: '''simple docstring''' lowerCAmelCase : Dict = [(x[0], x[1], get_model(x[2] )) for x in logs] lowerCAmelCase : int = [x for x in logs if x[2] is not None] lowerCAmelCase : List[str] = {x[2] for x in logs} lowerCAmelCase : Optional[int] = {} for test in tests: lowerCAmelCase : Tuple = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) lowerCAmelCase : str = counter.most_common() lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} lowerCAmelCase : List[str] = sum(error_counts.values() ) if n_errors > 0: lowerCAmelCase : Optional[int] = {'count': n_errors, 'errors': error_counts} lowerCAmelCase : Tuple = dict(sorted(r.items(), key=lambda _UpperCAmelCase : item[1]["count"], reverse=_UpperCAmelCase ) ) return r def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase : int = '| no. | error | status |' lowerCAmelCase : List[Any] = '|-:|:-|:-|' lowerCAmelCase : Optional[Any] = [header, sep] for error in reduced_by_error: lowerCAmelCase : str = reduced_by_error[error]['count'] lowerCAmelCase : Any = f"| {count} | {error[:100]} | |" lines.append(_UpperCAmelCase ) return "\n".join(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]: '''simple docstring''' lowerCAmelCase : Optional[int] = '| model | no. of errors | major error | count |' lowerCAmelCase : Tuple = '|-:|-:|-:|-:|' lowerCAmelCase : Dict = [header, sep] for model in reduced_by_model: lowerCAmelCase : Optional[int] = reduced_by_model[model]['count'] lowerCAmelCase , lowerCAmelCase : Union[str, Any] = list(reduced_by_model[model]['errors'].items() )[0] lowerCAmelCase : Optional[Any] = f"| {model} | {count} | {error[:60]} | {_count} |" lines.append(_UpperCAmelCase ) return "\n".join(_UpperCAmelCase ) if __name__ == "__main__": __A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') __A : str = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __A : Optional[Any] = get_job_links(args.workflow_run_id, token=args.token) __A : int = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __A : Dict = k.find(''' / ''') __A : Any = k[index + len(''' / ''') :] __A : Dict = v with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __A : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __A : int = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __A : str = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __A : List[Any] = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __A : List[Any] = reduce_by_error(errors) __A : Tuple = reduce_by_model(errors) __A : int = make_github_table(reduced_by_error) __A : Union[str, Any] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa) with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa)
138
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' __UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] __UpperCAmelCase = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key: return input_string for position, character in enumerate(SCREAMING_SNAKE_CASE ): __UpperCAmelCase = position % (lowest * 2) # puts it in bounds __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid] __UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE ) return output_string def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' __UpperCAmelCase = [] __UpperCAmelCase = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1: return input_string __UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template for position in range(len(SCREAMING_SNAKE_CASE ) ): __UpperCAmelCase = position % (lowest * 2) # puts it in bounds __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('''*''' ) __UpperCAmelCase = 0 for row in temp_grid: # fills in the characters __UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )] grid.append(list(SCREAMING_SNAKE_CASE ) ) counter += len(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = '''''' # reads as zigzag for position in range(len(SCREAMING_SNAKE_CASE ) ): __UpperCAmelCase = position % (lowest * 2) # puts it in bounds __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]: '''simple docstring''' __UpperCAmelCase = {} for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key __UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return results if __name__ == "__main__": import doctest doctest.testmod()
333
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig UpperCAmelCase : Optional[int] = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class lowerCamelCase__ ( _a ): """simple docstring""" __a = """tapas""" def __init__( self : Tuple , UpperCamelCase : List[str]=30_522 , UpperCamelCase : Optional[Any]=768 , UpperCamelCase : Optional[Any]=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : Any=3_072 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : List[str]=1_024 , UpperCamelCase : List[Any]=[3, 256, 256, 2, 256, 256, 10] , UpperCamelCase : str=0.02 , UpperCamelCase : List[str]=1e-1_2 , UpperCamelCase : List[Any]=0 , UpperCamelCase : List[Any]=10.0 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Union[str, Any]=1.0 , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : int=1.0 , UpperCamelCase : int=False , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[int]=1.0 , UpperCamelCase : Union[str, Any]=1.0 , UpperCamelCase : Any=False , UpperCamelCase : str=False , UpperCamelCase : Optional[int]="ratio" , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=64 , UpperCamelCase : Tuple=32 , UpperCamelCase : Tuple=False , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Dict=False , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Any=True , UpperCamelCase : Dict=False , UpperCamelCase : str=None , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(pad_token_id=lowercase__ , **lowercase__ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : int = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : Dict = hidden_act __UpperCAmelCase : Dict = intermediate_size __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : str = max_position_embeddings __UpperCAmelCase : Any = type_vocab_sizes __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : List[Any] = layer_norm_eps # Fine-tuning task hyperparameters __UpperCAmelCase : int = positive_label_weight __UpperCAmelCase : Optional[Any] = num_aggregation_labels __UpperCAmelCase : int = aggregation_loss_weight __UpperCAmelCase : Optional[int] = use_answer_as_supervision __UpperCAmelCase : int = answer_loss_importance __UpperCAmelCase : List[Any] = use_normalized_answer_loss __UpperCAmelCase : Optional[int] = huber_loss_delta __UpperCAmelCase : Optional[Any] = temperature __UpperCAmelCase : Tuple = aggregation_temperature __UpperCAmelCase : Union[str, Any] = use_gumbel_for_cells __UpperCAmelCase : Dict = use_gumbel_for_aggregation __UpperCAmelCase : Union[str, Any] = average_approximation_function __UpperCAmelCase : Dict = cell_selection_preference __UpperCAmelCase : Optional[int] = answer_loss_cutoff __UpperCAmelCase : Tuple = max_num_rows __UpperCAmelCase : str = max_num_columns __UpperCAmelCase : Optional[int] = average_logits_per_cell __UpperCAmelCase : List[str] = select_one_column __UpperCAmelCase : Dict = allow_empty_column_selection __UpperCAmelCase : List[Any] = init_cell_selection_weights_to_zero __UpperCAmelCase : Union[str, Any] = reset_position_index_per_cell __UpperCAmelCase : List[str] = disable_per_token_loss # Aggregation hyperparameters __UpperCAmelCase : List[str] = aggregation_labels __UpperCAmelCase : List[Any] = no_aggregation_label_index if isinstance(self.aggregation_labels , lowercase__ ): __UpperCAmelCase : Dict = {int(lowercase__ ): v for k, v in aggregation_labels.items()}
115
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class A_ ( _a , _a , _a , unittest.TestCase ): '''simple docstring''' a__ = StableUnCLIPPipeline a__ = TEXT_TO_IMAGE_PARAMS a__ = TEXT_TO_IMAGE_BATCH_PARAMS a__ = TEXT_TO_IMAGE_IMAGE_PARAMS a__ = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false a__ = False def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = 32 __UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) __UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ ) __UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , ) torch.manual_seed(0 ) __UpperCAmelCase = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL() __UpperCAmelCase = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]: if str(lowercase__ ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(lowercase__ ) else: __UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) __UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ ) def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=lowercase__ ) @slow @require_torch_gpu class A_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ (self ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' ) __UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) __UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
333
0
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCamelCase__ : Tuple = { 'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json', # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class _UpperCamelCase ( _a ): '''simple docstring''' _A : Optional[Any] = '''mctct''' def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any]=8_0_6_5 , lowerCAmelCase__ : Any=1_5_3_6 , lowerCAmelCase__ : Any=3_6 , lowerCAmelCase__ : Optional[Any]=6_1_4_4 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Optional[Any]=3_8_4 , lowerCAmelCase__ : Dict=9_2_0 , lowerCAmelCase__ : List[str]=1E-5 , lowerCAmelCase__ : Optional[int]=0.3 , lowerCAmelCase__ : str="relu" , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : List[str]=0.3 , lowerCAmelCase__ : Optional[Any]=0.3 , lowerCAmelCase__ : Union[str, Any]=1 , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : Optional[int]=0.3 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : str=(7,) , lowerCAmelCase__ : Union[str, Any]=(3,) , lowerCAmelCase__ : Any=8_0 , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict="sum" , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str , ): """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) __SCREAMING_SNAKE_CASE : Dict = vocab_size __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : int = num_hidden_layers __SCREAMING_SNAKE_CASE : int = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings __SCREAMING_SNAKE_CASE : int = layer_norm_eps __SCREAMING_SNAKE_CASE : str = layerdrop __SCREAMING_SNAKE_CASE : Any = hidden_act __SCREAMING_SNAKE_CASE : Dict = initializer_range __SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : int = pad_token_id __SCREAMING_SNAKE_CASE : str = bos_token_id __SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id __SCREAMING_SNAKE_CASE : Optional[int] = conv_glu_dim __SCREAMING_SNAKE_CASE : Optional[Any] = conv_dropout __SCREAMING_SNAKE_CASE : int = num_conv_layers __SCREAMING_SNAKE_CASE : Any = input_feat_per_channel __SCREAMING_SNAKE_CASE : List[str] = input_channels __SCREAMING_SNAKE_CASE : str = conv_channels __SCREAMING_SNAKE_CASE : Dict = ctc_loss_reduction __SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # prevents config testing fail with exporting to json __SCREAMING_SNAKE_CASE : Optional[int] = list(lowercase__ ) __SCREAMING_SNAKE_CASE : str = list(lowercase__ ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """ F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, " F"`config.num_conv_layers = {self.num_conv_layers}`." )
112
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A_ : int = logging.get_logger(__name__) A_ : str = {'tokenizer_file': 'tokenizer.json'} A_ : List[str] = { 'tokenizer_file': { 'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json', 'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json', 'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json', 'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json', 'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json', 'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json', 'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json', }, } class A_ ( _a ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = ["input_ids", "attention_mask"] a__ = None def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict: super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , ) __UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: __UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) __UpperCAmelCase = add_prefix_space __UpperCAmelCase = pre_tok_class(**lowercase__ ) __UpperCAmelCase = add_prefix_space def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding: __UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ''' pretokenized inputs.''' ) return super()._batch_encode_plus(*lowercase__ , **lowercase__ ) def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding: __UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ''' pretokenized inputs.''' ) return super()._encode_plus(*lowercase__ , **lowercase__ ) def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]: __UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def lowerCAmelCase_ (self , lowercase__ ) -> List[int]: __UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: __UpperCAmelCase = input_ids[-self.model_max_length :] return input_ids
333
0
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowercase_ ( _lowerCamelCase : Union[str, Any]): return 1 / (1 + np.exp(-z)) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Dict): return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean() def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : int): lowercase__ : Optional[Any] = np.dot(_lowerCamelCase , _lowerCamelCase) return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase))) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int]=7_0000): lowercase__ : Any = np.zeros(x.shape[1]) for iterations in range(_lowerCamelCase): lowercase__ : Optional[int] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[Any] = sigmoid_function(_lowerCamelCase) lowercase__ : str = np.dot(x.T , h - y) / y.size lowercase__ : str = theta - alpha * gradient # updating the weights lowercase__ : Dict = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : str = sigmoid_function(_lowerCamelCase) lowercase__ : str = cost_function(_lowerCamelCase , _lowerCamelCase) if iterations % 100 == 0: print(f'''loss: {j} \t''') # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase = datasets.load_iris() UpperCamelCase = iris.data[:, :2] UpperCamelCase = (iris.target != 0) * 1 UpperCamelCase = 0.1 UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000) print('''theta: ''', theta) # printing the theta i.e our weights vector def lowercase_ ( _lowerCamelCase : Union[str, Any]): return sigmoid_function( np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') (UpperCamelCase) = (x[:, 0].min(), x[:, 0].max()) (UpperCamelCase) = (x[:, 1].min(), x[:, 1].max()) (UpperCamelCase) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
87
import math import sys def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' if number != int(SCREAMING_SNAKE_CASE ): raise ValueError('''the value of input must be a natural number''' ) if number < 0: raise ValueError('''the value of input must not be a negative number''' ) if number == 0: return 1 __UpperCAmelCase = [-1] * (number + 1) __UpperCAmelCase = 0 for i in range(1 , number + 1 ): __UpperCAmelCase = sys.maxsize __UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE ) ) for j in range(1 , root + 1 ): __UpperCAmelCase = 1 + answers[i - (j**2)] __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCAmelCase = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
333
0
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder lowerCAmelCase__ : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase__ : Any =256 class UpperCAmelCase_ ( _a ): '''simple docstring''' UpperCamelCase__ : Union[str, Any] = ['''melgan'''] def __init__( self , _A , _A , _A , _A , _A , ): '''simple docstring''' super().__init__() # From MELGAN __SCREAMING_SNAKE_CASE = math.log(1e-5 ) # Matches MelGAN training. __SCREAMING_SNAKE_CASE = 4.0 # Largest value for most examples __SCREAMING_SNAKE_CASE = 128 self.register_modules( notes_encoder=lowercase__ , continuous_encoder=lowercase__ , decoder=lowercase__ , scheduler=lowercase__ , melgan=lowercase__ , ) def _A ( self , _A , _A=(-1.0, 1.0) , _A=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output_range if clip: __SCREAMING_SNAKE_CASE = torch.clip(lowercase__ , self.min_value , self.max_value ) # Scale to [0, 1]. __SCREAMING_SNAKE_CASE = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def _A ( self , _A , _A=(-1.0, 1.0) , _A=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = input_range __SCREAMING_SNAKE_CASE = torch.clip(lowercase__ , lowercase__ , lowercase__ ) if clip else outputs # Scale to [0, 1]. __SCREAMING_SNAKE_CASE = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def _A ( self , _A , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = input_tokens > 0 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.notes_encoder( encoder_input_tokens=lowercase__ , encoder_inputs_mask=lowercase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.continuous_encoder( encoder_inputs=lowercase__ , encoder_inputs_mask=lowercase__ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def _A ( self , _A , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = noise_time if not torch.is_tensor(lowercase__ ): __SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase__ ) and len(timesteps.shape ) == 0: __SCREAMING_SNAKE_CASE = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __SCREAMING_SNAKE_CASE = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) __SCREAMING_SNAKE_CASE = self.decoder( encodings_and_masks=lowercase__ , decoder_input_tokens=lowercase__ , decoder_noise_time=lowercase__ ) return logits @torch.no_grad() def __call__( self , _A , _A = None , _A = 100 , _A = True , _A = "numpy" , _A = None , _A = 1 , ): '''simple docstring''' if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase__ , lowercase__ ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(lowercase__ )}.""" ) __SCREAMING_SNAKE_CASE = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) __SCREAMING_SNAKE_CASE = np.zeros([1, 0, self.n_dims] , np.floataa ) __SCREAMING_SNAKE_CASE = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase__ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase__ ): if i == 0: __SCREAMING_SNAKE_CASE = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. __SCREAMING_SNAKE_CASE = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase__ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. __SCREAMING_SNAKE_CASE = ones __SCREAMING_SNAKE_CASE = self.scale_features( lowercase__ , output_range=[-1.0, 1.0] , clip=lowercase__ ) __SCREAMING_SNAKE_CASE = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase__ , continuous_mask=lowercase__ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop __SCREAMING_SNAKE_CASE = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase__ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase__ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __SCREAMING_SNAKE_CASE = self.decode( encodings_and_masks=lowercase__ , input_tokens=lowercase__ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 __SCREAMING_SNAKE_CASE = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ ).prev_sample __SCREAMING_SNAKE_CASE = self.scale_to_features(lowercase__ , input_range=[-1.0, 1.0] ) __SCREAMING_SNAKE_CASE = mel[:1] __SCREAMING_SNAKE_CASE = mel.cpu().float().numpy() __SCREAMING_SNAKE_CASE = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase__ , lowercase__ ) logger.info('Generated segment' , lowercase__ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( 'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( 'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' ) if output_type == "numpy": __SCREAMING_SNAKE_CASE = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: __SCREAMING_SNAKE_CASE = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase__ )
257
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL A_ : Tuple = logging.get_logger(__name__) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' __UpperCAmelCase = b.T __UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 ) __UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 ) __UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :] return d def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' __UpperCAmelCase = x.reshape(-1 , 3 ) __UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return np.argmin(SCREAMING_SNAKE_CASE , axis=1 ) class A_ ( _a ): '''simple docstring''' a__ = ["pixel_values"] def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None: super().__init__(**lowercase__ ) __UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256} __UpperCAmelCase = get_size_dict(lowercase__ ) __UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None __UpperCAmelCase = do_resize __UpperCAmelCase = size __UpperCAmelCase = resample __UpperCAmelCase = do_normalize __UpperCAmelCase = do_color_quantize def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray: __UpperCAmelCase = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' ) return resize( lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray: __UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ ) __UpperCAmelCase = image - 1 return image def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image: __UpperCAmelCase = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase = size if size is not None else self.size __UpperCAmelCase = get_size_dict(lowercase__ ) __UpperCAmelCase = resample if resample is not None else self.resample __UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize __UpperCAmelCase = clusters if clusters is not None else self.clusters __UpperCAmelCase = np.array(lowercase__ ) __UpperCAmelCase = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_color_quantize and clusters is None: raise ValueError('''Clusters must be specified if do_color_quantize is True.''' ) # All transformations expect numpy arrays. __UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images] if do_resize: __UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images] if do_normalize: __UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images] if do_color_quantize: __UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) __UpperCAmelCase = np.array(lowercase__ ) __UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) __UpperCAmelCase = images.shape[0] __UpperCAmelCase = images.reshape(lowercase__ , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. __UpperCAmelCase = list(lowercase__ ) else: __UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] __UpperCAmelCase = {'''input_ids''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
333
0
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : Tuple = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } UpperCAmelCase : Union[str, Any] = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } UpperCAmelCase : int = {'facebook/blenderbot_small-90M': 5_12} def __lowerCamelCase ( lowerCamelCase__ : str ): '''simple docstring''' lowerCamelCase = set() lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase = char lowerCamelCase = set(lowerCamelCase__ ) return pairs class __lowercase ( _a ): """simple docstring""" UpperCamelCase : str = VOCAB_FILES_NAMES UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase : List[str] = ["input_ids", "attention_mask"] def __init__( self , A , A , A="__start__" , A="__end__" , A="__unk__" , A="__null__" , **A , ) -> int: '''simple docstring''' super().__init__(unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , **lowercase__ ) with open(lowercase__ , encoding="""utf-8""" ) as vocab_handle: lowerCamelCase = json.load(lowercase__ ) lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(lowercase__ , encoding="""utf-8""" ) as merges_handle: lowerCamelCase = merges_handle.read().split("""\n""" )[1:-1] lowerCamelCase = [tuple(merge.split() ) for merge in merges] lowerCamelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) lowerCamelCase = {} @property def __A ( self ) -> int: '''simple docstring''' return len(self.encoder ) def __A ( self ) -> Dict: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self , A ) -> str: '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase = re.sub("""([.,!?()])""" , r""" \1""" , lowercase__ ) lowerCamelCase = re.sub("""(\')""" , r""" \1 """ , lowercase__ ) lowerCamelCase = re.sub(r"""\s{2,}""" , """ """ , lowercase__ ) if "\n" in token: lowerCamelCase = token.replace("""\n""" , """ __newln__""" ) lowerCamelCase = token.split(""" """ ) lowerCamelCase = [] for token in tokens: if not len(lowercase__ ): continue lowerCamelCase = token.lower() lowerCamelCase = tuple(lowercase__ ) lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) lowerCamelCase = get_pairs(lowercase__ ) if not pairs: words.append(lowercase__ ) continue while True: lowerCamelCase = min(lowercase__ , key=lambda A : self.bpe_ranks.get(lowercase__ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase , lowerCamelCase = bigram lowerCamelCase = [] lowerCamelCase = 0 while i < len(lowercase__ ): try: lowerCamelCase = word.index(lowercase__ , lowercase__ ) new_word.extend(word[i:j] ) lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowercase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase = tuple(lowercase__ ) lowerCamelCase = new_word if len(lowercase__ ) == 1: break else: lowerCamelCase = get_pairs(lowercase__ ) lowerCamelCase = """@@ """.join(lowercase__ ) lowerCamelCase = word[:-4] lowerCamelCase = word words.append(lowercase__ ) return " ".join(lowercase__ ) def __A ( self , A ) -> List[str]: '''simple docstring''' lowerCamelCase = [] lowerCamelCase = re.findall(r"""\S+\n?""" , lowercase__ ) for token in words: split_tokens.extend(list(self.bpe(lowercase__ ).split(""" """ ) ) ) return split_tokens def __A ( self , A ) -> int: '''simple docstring''' lowerCamelCase = token.lower() return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token ) ) def __A ( self , A ) -> str: '''simple docstring''' return self.decoder.get(lowercase__ , self.unk_token ) def __A ( self , A ) -> str: '''simple docstring''' lowerCamelCase = """ """.join(lowercase__ ).replace("""@@ """ , """""" ).strip() return out_string def __A ( self , A , A = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowercase__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase = os.path.join( lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase = os.path.join( lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__ ) + """\n""" ) lowerCamelCase = 0 with open(lowercase__ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) lowerCamelCase = token_index writer.write(""" """.join(lowercase__ ) + """\n""" ) index += 1 return vocab_file, merge_file
252
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A_ : Optional[int] = { 'configuration_poolformer': [ 'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PoolFormerConfig', 'PoolFormerOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[str] = ['PoolFormerFeatureExtractor'] A_ : Dict = ['PoolFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ 'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PoolFormerForImageClassification', 'PoolFormerModel', 'PoolFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
333
0
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : Dict = logging.get_logger(__name__) def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ): __lowercase : Any = RobertaPreLayerNormConfig.from_pretrained( lowerCAmelCase_ , architectures=["""RobertaPreLayerNormForMaskedLM"""] ) # convert state_dict __lowercase : Union[str, Any] = torch.load(hf_hub_download(repo_id=lowerCAmelCase_ , filename="""pytorch_model.bin""" ) ) __lowercase : Optional[int] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("""roberta.""" ): __lowercase : List[Any] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ): continue __lowercase : Optional[int] = tensor_value __lowercase : Dict = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=lowerCAmelCase_ , config=lowerCAmelCase_ , state_dict=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) # convert tokenizer __lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) tokenizer.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase : Optional[int] = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
233
import math def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: '''simple docstring''' if ( not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * power_factor def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: '''simple docstring''' if ( not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
333
0
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor _SCREAMING_SNAKE_CASE : Optional[Any] = random.Random() def UpperCAmelCase_ ( _A , _A=1.0 , _A=None , _A=None ): '''simple docstring''' if rng is None: SCREAMING_SNAKE_CASE__ = global_rng SCREAMING_SNAKE_CASE__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int=7 , __lowerCamelCase : List[Any]=400 , __lowerCamelCase : Any=2000 , __lowerCamelCase : Union[str, Any]=24 , __lowerCamelCase : List[Any]=24 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : int=1_6000 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=True , ) -> int: SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = min_seq_length SCREAMING_SNAKE_CASE__ = max_seq_length SCREAMING_SNAKE_CASE__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE__ = feature_size SCREAMING_SNAKE_CASE__ = num_mel_bins SCREAMING_SNAKE_CASE__ = padding_value SCREAMING_SNAKE_CASE__ = sampling_rate SCREAMING_SNAKE_CASE__ = return_attention_mask SCREAMING_SNAKE_CASE__ = do_normalize def lowercase_ ( self : Any ) -> List[Any]: return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[str]=False , __lowerCamelCase : int=False ) -> int: def _flatten(__lowerCamelCase : List[Any] ): return list(itertools.chain(*lowercase__ ) ) if equal_length: SCREAMING_SNAKE_CASE__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__ = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__ = [np.asarray(lowercase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase__ ( _a , unittest.TestCase ): """simple docstring""" a = SpeechaTextFeatureExtractor if is_speech_available() else None def lowercase_ ( self : Union[str, Any] ) -> Any: SCREAMING_SNAKE_CASE__ = SpeechaTextFeatureExtractionTester(self ) def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> List[str]: self.assertTrue(np.all(np.mean(lowercase__ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowercase__ , axis=0 ) - 1 ) < 1e-3 ) ) def lowercase_ ( self : str ) -> Any: # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ = [np.asarray(lowercase__ ) for speech_input in speech_inputs] # Test feature size SCREAMING_SNAKE_CASE__ = feature_extractor(lowercase__ , padding=lowercase__ , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input SCREAMING_SNAKE_CASE__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features SCREAMING_SNAKE_CASE__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE__ = feature_extractor(lowercase__ , return_tensors='''np''' ).input_features SCREAMING_SNAKE_CASE__ = feature_extractor(lowercase__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ): self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE__ = np.asarray(lowercase__ ) SCREAMING_SNAKE_CASE__ = feature_extractor(lowercase__ , return_tensors='''np''' ).input_features SCREAMING_SNAKE_CASE__ = feature_extractor(lowercase__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ): self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) ) def lowercase_ ( self : Any ) -> Tuple: SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ = ['''longest''', '''max_length''', '''do_not_pad'''] SCREAMING_SNAKE_CASE__ = [None, 16, None] for max_length, padding in zip(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ = feature_extractor( lowercase__ , padding=lowercase__ , max_length=lowercase__ , return_attention_mask=lowercase__ ) SCREAMING_SNAKE_CASE__ = inputs.input_features SCREAMING_SNAKE_CASE__ = inputs.attention_mask SCREAMING_SNAKE_CASE__ = [np.sum(lowercase__ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase_ ( self : Any ) -> Any: SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ = ['''longest''', '''max_length''', '''do_not_pad'''] SCREAMING_SNAKE_CASE__ = [None, 16, None] for max_length, padding in zip(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ = feature_extractor( lowercase__ , max_length=lowercase__ , padding=lowercase__ , return_tensors='''np''' , return_attention_mask=lowercase__ ) SCREAMING_SNAKE_CASE__ = inputs.input_features SCREAMING_SNAKE_CASE__ = inputs.attention_mask SCREAMING_SNAKE_CASE__ = [np.sum(lowercase__ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase_ ( self : Optional[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ = feature_extractor( lowercase__ , padding='''max_length''' , max_length=4 , truncation=lowercase__ , return_tensors='''np''' , return_attention_mask=lowercase__ , ) SCREAMING_SNAKE_CASE__ = inputs.input_features SCREAMING_SNAKE_CASE__ = inputs.attention_mask SCREAMING_SNAKE_CASE__ = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowercase_ ( self : Optional[Any] ) -> int: SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ = feature_extractor( lowercase__ , padding='''longest''' , max_length=4 , truncation=lowercase__ , return_tensors='''np''' , return_attention_mask=lowercase__ , ) SCREAMING_SNAKE_CASE__ = inputs.input_features SCREAMING_SNAKE_CASE__ = inputs.attention_mask SCREAMING_SNAKE_CASE__ = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__ = feature_extractor( lowercase__ , padding='''longest''' , max_length=16 , truncation=lowercase__ , return_tensors='''np''' , return_attention_mask=lowercase__ , ) SCREAMING_SNAKE_CASE__ = inputs.input_features SCREAMING_SNAKE_CASE__ = inputs.attention_mask SCREAMING_SNAKE_CASE__ = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def lowercase_ ( self : Union[str, Any] ) -> Optional[int]: import torch SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ = np.random.rand(100 , 32 ).astype(np.floataa ) SCREAMING_SNAKE_CASE__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) SCREAMING_SNAKE_CASE__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase_ ( self : int , __lowerCamelCase : Union[str, Any] ) -> List[Any]: from datasets import load_dataset SCREAMING_SNAKE_CASE__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE__ = ds.sort('''id''' ).select(range(lowercase__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowercase_ ( self : Union[str, Any] ) -> Tuple: # fmt: off SCREAMING_SNAKE_CASE__ = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ] ) # fmt: on SCREAMING_SNAKE_CASE__ = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ = feature_extractor(lowercase__ , return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase__ , atol=1e-4 ) )
314
def __a ( ) -> list[list[int]]: '''simple docstring''' return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )] A_ : Union[str, Any] = generate_large_matrix() A_ : Union[str, Any] = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __a ( SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid ) assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) ) def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' __UpperCAmelCase = 0 __UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: __UpperCAmelCase = (left + right) // 2 __UpperCAmelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: __UpperCAmelCase = mid + 1 else: __UpperCAmelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' __UpperCAmelCase = 0 __UpperCAmelCase = len(grid[0] ) for i in range(len(SCREAMING_SNAKE_CASE ) ): __UpperCAmelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' __UpperCAmelCase = 0 for row in grid: for i, number in enumerate(SCREAMING_SNAKE_CASE ): if number < 0: total += len(SCREAMING_SNAKE_CASE ) - i break return total def __a ( ) -> None: '''simple docstring''' from timeit import timeit print('''Running benchmarks''' ) __UpperCAmelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): __UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 ) print(f'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
333
0
"""simple docstring""" import os import numpy import onnx def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Optional[Any] ): """simple docstring""" _snake_case : List[str] = a.name _snake_case : str = b.name _snake_case : Optional[int] = """""" _snake_case : List[Any] = """""" _snake_case : List[str] = a == b _snake_case : Optional[Any] = name_a _snake_case : Tuple = name_b return res def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(snake_case__ , snake_case__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , snake_case__ , snake_case__ ) _graph_replace_input_with(node_proto.attribute[1].g , snake_case__ , snake_case__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : str = list(model.graph.initializer ) _snake_case : List[Any] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : str = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : str = os.path.dirname(snake_case__ ) _snake_case : Union[str, Any] = os.path.basename(snake_case__ ) _snake_case : Dict = onnx.load(os.path.join(snake_case__ , snake_case__ ) ) _snake_case : str = list(model.graph.initializer ) _snake_case : Tuple = set() _snake_case : int = {} _snake_case : Optional[Any] = [] _snake_case : str = 0 for i in range(len(snake_case__ ) ): if i in dup_set: continue for j in range(i + 1 , len(snake_case__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(snake_case__ ) dup_set.add(snake_case__ ) _snake_case : Optional[Any] = inits[j].data_type _snake_case : Any = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , snake_case__ ) total_reduced_size += mem_size _snake_case : Dict = inits[i].name _snake_case : List[Any] = inits[j].name if name_i in dup_map: dup_map[name_i].append(snake_case__ ) else: _snake_case : Dict = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 10_24 / 10_24 / 10_24 , """GB""" ) _snake_case : List[Any] = sorted(snake_case__ ) _remove_dup_initializers_from_model(snake_case__ , snake_case__ , snake_case__ ) _snake_case : Any = """optimized_""" + model_file_name _snake_case : Optional[int] = os.path.join(snake_case__ , snake_case__ ) onnx.save(snake_case__ , snake_case__ ) return new_model
64
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 A_ : List[str] = sys.version_info >= (3, 10) def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str: '''simple docstring''' return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE ) @dataclass class A_ : '''simple docstring''' a__ = 42 a__ = 42 a__ = 42 a__ = 42 @dataclass class A_ : '''simple docstring''' a__ = 42 a__ = field(default="toto" , metadata={"help": "help message"} ) @dataclass class A_ : '''simple docstring''' a__ = False a__ = True a__ = None class A_ ( _a ): '''simple docstring''' a__ = "titi" a__ = "toto" class A_ ( _a ): '''simple docstring''' a__ = "titi" a__ = "toto" a__ = 42 @dataclass class A_ : '''simple docstring''' a__ = "toto" def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = BasicEnum(self.foo ) @dataclass class A_ : '''simple docstring''' a__ = "toto" def lowerCAmelCase_ (self ) -> Dict: __UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class A_ : '''simple docstring''' a__ = None a__ = field(default=_a , metadata={"help": "help message"} ) a__ = None a__ = list_field(default=[] ) a__ = list_field(default=[] ) @dataclass class A_ : '''simple docstring''' a__ = list_field(default=[] ) a__ = list_field(default=[1, 2, 3] ) a__ = list_field(default=["Hallo", "Bonjour", "Hello"] ) a__ = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class A_ : '''simple docstring''' a__ = field() a__ = field() a__ = field() def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class A_ : '''simple docstring''' a__ = 42 a__ = field() a__ = None a__ = field(default="toto" , metadata={"help": "help message"} ) a__ = list_field(default=["Hallo", "Bonjour", "Hello"] ) if is_python_no_less_than_3_10: @dataclass class A_ : '''simple docstring''' a__ = False a__ = True a__ = None @dataclass class A_ : '''simple docstring''' a__ = None a__ = field(default=_a , metadata={"help": "help message"} ) a__ = None a__ = list_field(default=[] ) a__ = list_field(default=[] ) class A_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): __UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''} __UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) ) del xx["type"], yy["type"] self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ ) self.assertFalse(example.flag ) def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=lowercase__ ) expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' ) self.argparsersEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' ) expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' ) expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ ) __UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(lowercase__ ) for dataclass_type in dataclass_types: __UpperCAmelCase = HfArgumentParser(lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) def lowerCAmelCase_ (self ) -> Dict: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) __UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) __UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) __UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase_ (self ) -> str: @dataclass class A_ : '''simple docstring''' a__ = "toto" __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase_ (self ) -> str: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual( lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) __UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase_ (self ) -> List[str]: __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ ) expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' ) expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ ) __UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(lowercase__ ) for dataclass_type in dataclass_types: __UpperCAmelCase = HfArgumentParser(lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) ) __UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , ) self.argparsersEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , ) expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ ) expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } __UpperCAmelCase = parser.parse_dict(lowercase__ )[0] __UpperCAmelCase = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ ) def lowerCAmelCase_ (self ) -> Any: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' ) os.mkdir(lowercase__ ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] __UpperCAmelCase = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> List[Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' ) os.mkdir(lowercase__ ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] __UpperCAmelCase = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = HfArgumentParser(lowercase__ ) self.assertIsNotNone(lowercase__ )
333
0
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Dict = [[] for _ in range(__UpperCamelCase )] __lowercase : Optional[Any] = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1 or len(__UpperCamelCase ) <= key: return input_string for position, character in enumerate(__UpperCamelCase ): __lowercase : Optional[Any] = position % (lowest * 2) # puts it in bounds __lowercase : Dict = min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(__UpperCamelCase ) __lowercase : Dict = [''''''.join(__UpperCamelCase ) for row in temp_grid] __lowercase : int = ''''''.join(__UpperCamelCase ) return output_string def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : int = [] __lowercase : Dict = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1: return input_string __lowercase : Tuple = [[] for _ in range(__UpperCamelCase )] # generates template for position in range(len(__UpperCamelCase ) ): __lowercase : Any = position % (lowest * 2) # puts it in bounds __lowercase : List[str] = min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('''*''' ) __lowercase : Dict = 0 for row in temp_grid: # fills in the characters __lowercase : Tuple = input_string[counter : counter + len(__UpperCamelCase )] grid.append(list(__UpperCamelCase ) ) counter += len(__UpperCamelCase ) __lowercase : str = '''''' # reads as zigzag for position in range(len(__UpperCamelCase ) ): __lowercase : Any = position % (lowest * 2) # puts it in bounds __lowercase : Optional[Any] = min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Any = {} for key_guess in range(1 , len(__UpperCamelCase ) ): # tries every key __lowercase : Tuple = decrypt(__UpperCamelCase , __UpperCamelCase ) return results if __name__ == "__main__": import doctest doctest.testmod()
249
import doctest from collections import deque import numpy as np class A_ : '''simple docstring''' def __init__(self ) -> None: __UpperCAmelCase = [2, 1, 2, -1] __UpperCAmelCase = [1, 2, 3, 4] def lowerCAmelCase_ (self ) -> list[float]: __UpperCAmelCase = len(self.first_signal ) __UpperCAmelCase = len(self.second_signal ) __UpperCAmelCase = max(lowercase__ , lowercase__ ) # create a zero matrix of max_length x max_length __UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(lowercase__ ): __UpperCAmelCase = deque(self.second_signal ) rotated_signal.rotate(lowercase__ ) for j, item in enumerate(lowercase__ ): matrix[i][j] += item # multiply the matrix with the first signal __UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(lowercase__ , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
333
0
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __A( _a ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (KDPMaDiscreteScheduler,) SCREAMING_SNAKE_CASE__ = 10 def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = { """num_train_timesteps""": 11_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**lowercase__ ) return config def UpperCAmelCase_ (self ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowercase__ ) def UpperCAmelCase_ (self ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowercase__ , beta_end=lowercase__ ) def UpperCAmelCase_ (self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowercase__ ) def UpperCAmelCase_ (self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase__ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCamelCase__ = scheduler_class(**lowercase__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase__ = self.dummy_model() UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase__ = sample.to(lowercase__ ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase__ = scheduler.scale_model_input(lowercase__ , lowercase__ ) UpperCamelCase__ = model(lowercase__ , lowercase__ ) UpperCamelCase__ = scheduler.step(lowercase__ , lowercase__ , lowercase__ ) UpperCamelCase__ = output.prev_sample UpperCamelCase__ = torch.sum(torch.abs(lowercase__ ) ) UpperCamelCase__ = torch.mean(torch.abs(lowercase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2 assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2 assert abs(result_mean.item() - 0.0002 ) < 1E-3 def UpperCAmelCase_ (self ): if torch_device == "mps": return UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**lowercase__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase__ = self.dummy_model() UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase__ = sample.to(lowercase__ ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase__ = scheduler.scale_model_input(lowercase__ , lowercase__ ) UpperCamelCase__ = model(lowercase__ , lowercase__ ) UpperCamelCase__ = scheduler.step(lowercase__ , lowercase__ , lowercase__ ) UpperCamelCase__ = output.prev_sample UpperCamelCase__ = torch.sum(torch.abs(lowercase__ ) ) UpperCamelCase__ = torch.mean(torch.abs(lowercase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 def UpperCAmelCase_ (self ): if torch_device == "mps": return UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**lowercase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowercase__ ) UpperCamelCase__ = self.dummy_model() UpperCamelCase__ = self.dummy_sample_deter.to(lowercase__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: UpperCamelCase__ = scheduler.scale_model_input(lowercase__ , lowercase__ ) UpperCamelCase__ = model(lowercase__ , lowercase__ ) UpperCamelCase__ = scheduler.step(lowercase__ , lowercase__ , lowercase__ ) UpperCamelCase__ = output.prev_sample UpperCamelCase__ = torch.sum(torch.abs(lowercase__ ) ) UpperCamelCase__ = torch.mean(torch.abs(lowercase__ ) ) if str(lowercase__ ).startswith("""cpu""" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3
244
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[Any] = { 'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class A_ ( _a ): '''simple docstring''' a__ = "pegasus" a__ = ["past_key_values"] a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str: __UpperCAmelCase = vocab_size __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = d_model __UpperCAmelCase = encoder_ffn_dim __UpperCAmelCase = encoder_layers __UpperCAmelCase = encoder_attention_heads __UpperCAmelCase = decoder_ffn_dim __UpperCAmelCase = decoder_layers __UpperCAmelCase = decoder_attention_heads __UpperCAmelCase = dropout __UpperCAmelCase = attention_dropout __UpperCAmelCase = activation_dropout __UpperCAmelCase = activation_function __UpperCAmelCase = init_std __UpperCAmelCase = encoder_layerdrop __UpperCAmelCase = decoder_layerdrop __UpperCAmelCase = use_cache __UpperCAmelCase = encoder_layers __UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , ) @property def lowerCAmelCase_ (self ) -> int: return self.encoder_attention_heads @property def lowerCAmelCase_ (self ) -> int: return self.d_model
333
0
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> bool: '''simple docstring''' lowerCAmelCase : Tuple = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
138
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ ( _a , unittest.TestCase ): '''simple docstring''' a__ = LongformerTokenizer a__ = True a__ = LongformerTokenizerFast a__ = True def lowerCAmelCase_ (self ) -> Any: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) __UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __UpperCAmelCase = {'''unk_token''': '''<unk>'''} __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase__ ) ) def lowerCAmelCase_ (self , **lowercase__ ) -> int: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def lowerCAmelCase_ (self , lowercase__ ) -> Dict: __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = '''lower newer''' return input_text, output_text def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True) self.assertListEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokens + [tokenizer.unk_token] __UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ ) def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' ) __UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowerCAmelCase_ (self ) -> Any: __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = '''Encode this sequence.''' __UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowercase__ , lowercase__ ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) # Testing spaces after special tokens __UpperCAmelCase = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space __UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ ) __UpperCAmelCase = '''Encode <mask> sequence''' __UpperCAmelCase = '''Encode <mask>sequence''' __UpperCAmelCase = tokenizer.encode(lowercase__ ) __UpperCAmelCase = encoded.index(lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokenizer.encode(lowercase__ ) __UpperCAmelCase = encoded.index(lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: pass def lowerCAmelCase_ (self ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __UpperCAmelCase = '''A, <mask> AllenNLP sentence.''' __UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) __UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def lowerCAmelCase_ (self ) -> Optional[int]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ ) self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ ) self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}''' __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
333
0
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class lowerCamelCase__ : """simple docstring""" __a = 42 __a = 42 class lowerCamelCase__ : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : List[str] = [[] for _ in range(lowercase__ )] __UpperCAmelCase : int = size def __getitem__( self : Union[str, Any] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' return iter(self._graph[vertex] ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._size def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : str ): '''simple docstring''' if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(lowercase__ , lowercase__ ) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : int = deque([start_vertex] ) __UpperCAmelCase : Any = [None] * self.size __UpperCAmelCase : Any = 0 while queue: __UpperCAmelCase : str = queue.popleft() __UpperCAmelCase : Optional[Any] = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: __UpperCAmelCase : int = current_distance + edge.weight __UpperCAmelCase : int = distances[edge.destination_vertex] if ( isinstance(lowercase__ , lowercase__ ) and new_distance >= dest_vertex_distance ): continue __UpperCAmelCase : Any = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
115
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class A_ ( _a ): '''simple docstring''' a__ = (IPNDMScheduler,) a__ = (("num_inference_steps", 50),) def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple: __UpperCAmelCase = {'''num_train_timesteps''': 1_000} config.update(**lowercase__ ) return config def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample __UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config(**lowercase__ ) __UpperCAmelCase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals __UpperCAmelCase = dummy_past_residuals[:] if time_step is None: __UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase__ ) __UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ ) new_scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals __UpperCAmelCase = dummy_past_residuals[:] __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase_ (self ) -> List[str]: pass def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample __UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config() __UpperCAmelCase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals (must be after setting timesteps) __UpperCAmelCase = dummy_past_residuals[:] if time_step is None: __UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase__ ) __UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase__ ) # copy over dummy past residual (must be after setting timesteps) __UpperCAmelCase = dummy_past_residuals[:] __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]: __UpperCAmelCase = self.scheduler_classes[0] __UpperCAmelCase = self.get_scheduler_config(**lowercase__ ) __UpperCAmelCase = scheduler_class(**lowercase__ ) __UpperCAmelCase = 10 __UpperCAmelCase = self.dummy_model() __UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(lowercase__ ) for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase = model(lowercase__ , lowercase__ ) __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase = model(lowercase__ , lowercase__ ) __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample return sample def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ ) for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config() __UpperCAmelCase = scheduler_class(**lowercase__ ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ): scheduler.set_timesteps(lowercase__ ) elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ): __UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __UpperCAmelCase = dummy_past_residuals[:] __UpperCAmelCase = scheduler.timesteps[5] __UpperCAmelCase = scheduler.timesteps[6] __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample __UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCAmelCase_ (self ) -> List[Any]: for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ ) def lowerCAmelCase_ (self ) -> str: __UpperCAmelCase = self.full_loop() __UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
333
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCamelCase__ : Tuple = logging.get_logger(__name__) def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: Union[str, Any] ): __SCREAMING_SNAKE_CASE : Dict = b.T __SCREAMING_SNAKE_CASE : Any = np.sum(np.square(_lowerCamelCase ) , axis=1 ) __SCREAMING_SNAKE_CASE : List[Any] = np.sum(np.square(_lowerCamelCase ) , axis=0 ) __SCREAMING_SNAKE_CASE : Optional[Any] = np.matmul(_lowerCamelCase , _lowerCamelCase ) __SCREAMING_SNAKE_CASE : str = aa[:, None] - 2 * ab + ba[None, :] return d def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: str ): __SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(-1 , 3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = squared_euclidean_distance(_lowerCamelCase , _lowerCamelCase ) return np.argmin(_lowerCamelCase , axis=1 ) class _UpperCamelCase ( _a ): '''simple docstring''' _A : Tuple = ['''pixel_values'''] def __init__( self : Tuple , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : List[str] = True , lowerCAmelCase__ : Union[str, Any] = None , lowerCAmelCase__ : str = PILImageResampling.BILINEAR , lowerCAmelCase__ : List[str] = True , lowerCAmelCase__ : Any = True , **lowerCAmelCase__ : Optional[int] , ): """simple docstring""" super().__init__(**lowercase__ ) __SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6} __SCREAMING_SNAKE_CASE : List[str] = get_size_dict(lowercase__ ) __SCREAMING_SNAKE_CASE : int = np.array(lowercase__ ) if clusters is not None else None __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Optional[int] = size __SCREAMING_SNAKE_CASE : Optional[Any] = resample __SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize __SCREAMING_SNAKE_CASE : List[str] = do_color_quantize def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : Tuple , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}" ) return resize( lowercase__ , size=(size["""height"""], size["""width"""]) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] = None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = rescale(image=lowercase__ , scale=1 / 1_27.5 , data_format=lowercase__ ) __SCREAMING_SNAKE_CASE : List[Any] = image - 1 return image def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any = None , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : Union[str, Any] = None , lowerCAmelCase__ : Any = None , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : Union[str, Any] = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE : str = size if size is not None else self.size __SCREAMING_SNAKE_CASE : int = get_size_dict(lowercase__ ) __SCREAMING_SNAKE_CASE : str = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize __SCREAMING_SNAKE_CASE : List[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize __SCREAMING_SNAKE_CASE : List[Any] = clusters if clusters is not None else self.clusters __SCREAMING_SNAKE_CASE : int = np.array(lowercase__ ) __SCREAMING_SNAKE_CASE : Any = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_color_quantize and clusters is None: raise ValueError("""Clusters must be specified if do_color_quantize is True.""" ) # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE : Any = [to_numpy_array(lowercase__ ) for image in images] if do_resize: __SCREAMING_SNAKE_CASE : Any = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images] if do_normalize: __SCREAMING_SNAKE_CASE : List[str] = [self.normalize(image=lowercase__ ) for image in images] if do_color_quantize: __SCREAMING_SNAKE_CASE : Union[str, Any] = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) __SCREAMING_SNAKE_CASE : Dict = np.array(lowercase__ ) __SCREAMING_SNAKE_CASE : List[str] = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) __SCREAMING_SNAKE_CASE : List[Any] = images.shape[0] __SCREAMING_SNAKE_CASE : Dict = images.reshape(lowercase__ , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. __SCREAMING_SNAKE_CASE : Optional[Any] = list(lowercase__ ) else: __SCREAMING_SNAKE_CASE : int = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] __SCREAMING_SNAKE_CASE : int = {"""input_ids""": images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
112
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A_ : '''simple docstring''' def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int: __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = num_channels __UpperCAmelCase = is_training __UpperCAmelCase = use_labels __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = num_labels __UpperCAmelCase = image_size __UpperCAmelCase = layer_depths __UpperCAmelCase = embed_dims def lowerCAmelCase_ (self ) -> str: __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ (self ) -> Optional[Any]: return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , ) def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int: __UpperCAmelCase = SwiftFormerModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() __UpperCAmelCase = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]: __UpperCAmelCase = self.num_labels __UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __UpperCAmelCase = model(lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) __UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = model(lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ (self ) -> Optional[int]: ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs() __UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A_ ( _a , _a , unittest.TestCase ): '''simple docstring''' a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () a__ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def lowerCAmelCase_ (self ) -> List[str]: __UpperCAmelCase = SwiftFormerModelTester(self ) __UpperCAmelCase = ConfigTester( self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def lowerCAmelCase_ (self ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' ) def lowerCAmelCase_ (self ) -> List[Any]: pass def lowerCAmelCase_ (self ) -> Any: __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(lowercase__ ) __UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(lowercase__ ) __UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase = [*signature.parameters.keys()] __UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase__ ) @slow def lowerCAmelCase_ (self ) -> Any: for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) @unittest.skip(reason='''SwiftFormer does not output attentions''' ) def lowerCAmelCase_ (self ) -> List[str]: pass def lowerCAmelCase_ (self ) -> Union[str, Any]: def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ): __UpperCAmelCase = model_class(lowercase__ ) model.to(lowercase__ ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) ) __UpperCAmelCase = outputs.hidden_states __UpperCAmelCase = 8 self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(lowercase__ ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = True check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: def _config_zero_init(lowercase__ ): __UpperCAmelCase = copy.deepcopy(lowercase__ ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(lowercase__ , lowercase__ , 1E-10 ) if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ): __UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) ) setattr(lowercase__ , lowercase__ , lowercase__ ) return configs_no_init __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = _config_zero_init(lowercase__ ) for model_class in self.all_model_classes: __UpperCAmelCase = model_class(config=lowercase__ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowerCAmelCase_ (self ) -> Optional[Any]: pass def __a ( ) -> Any: '''simple docstring''' __UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class A_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ (self ) -> str: return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None @slow def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ ) __UpperCAmelCase = self.default_image_processor __UpperCAmelCase = prepare_img() __UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ ) # forward pass with torch.no_grad(): __UpperCAmelCase = model(**lowercase__ ) # verify the logits __UpperCAmelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowercase__ ) __UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
333
0
from __future__ import annotations import math class snake_case_ : def __init__( self : Optional[int] , lowercase_ : Optional[int] ) -> None: lowercase__ : int = size # approximate the overall size of segment tree with given value lowercase__ : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update lowercase__ : Optional[int] = [0 for i in range(0 , 4 * size )] lowercase__ : str = [0 for i in range(0 , 4 * size )] # flag for lazy update def __UpperCamelCase ( self : Dict , lowercase_ : Tuple ) -> int: return idx * 2 def __UpperCamelCase ( self : int , lowercase_ : Optional[int] ) -> int: return idx * 2 + 1 def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Optional[int] ) -> None: if left_element == right_element: lowercase__ : List[Any] = a[left_element - 1] else: lowercase__ : Dict = (left_element + right_element) // 2 self.build(self.left(lowercase__ ) , lowercase__ , lowercase__ , lowercase__ ) self.build(self.right(lowercase__ ) , mid + 1 , lowercase__ , lowercase__ ) lowercase__ : Tuple = max( self.segment_tree[self.left(lowercase__ )] , self.segment_tree[self.right(lowercase__ )] ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Any ) -> bool: if self.flag[idx] is True: lowercase__ : List[Any] = self.lazy[idx] lowercase__ : str = False if left_element != right_element: lowercase__ : Optional[int] = self.lazy[idx] lowercase__ : str = self.lazy[idx] lowercase__ : Optional[int] = True lowercase__ : Union[str, Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: lowercase__ : Any = val if left_element != right_element: lowercase__ : List[Any] = val lowercase__ : List[Any] = val lowercase__ : Union[str, Any] = True lowercase__ : str = True return True lowercase__ : Optional[Any] = (left_element + right_element) // 2 self.update(self.left(lowercase__ ) , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) self.update(self.right(lowercase__ ) , mid + 1 , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) lowercase__ : Optional[int] = max( self.segment_tree[self.left(lowercase__ )] , self.segment_tree[self.right(lowercase__ )] ) return True def __UpperCamelCase ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Tuple ) -> int | float: if self.flag[idx] is True: lowercase__ : Dict = self.lazy[idx] lowercase__ : List[str] = False if left_element != right_element: lowercase__ : Any = self.lazy[idx] lowercase__ : Any = self.lazy[idx] lowercase__ : Optional[Any] = True lowercase__ : List[Any] = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] lowercase__ : List[Any] = (left_element + right_element) // 2 lowercase__ : Dict = self.query(self.left(lowercase__ ) , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) lowercase__ : Union[str, Any] = self.query(self.right(lowercase__ ) , mid + 1 , lowercase__ , lowercase__ , lowercase__ ) return max(lowercase__ , lowercase__ ) def __str__( self : Any ) -> str: return str([self.query(1 , 1 , self.size , lowercase__ , lowercase__ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": UpperCamelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] UpperCamelCase = 15 UpperCamelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
87
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES A_ : str = logging.get_logger(__name__) A_ : str = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) A_ : Optional[int] = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) A_ : Union[str, Any] = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) A_ : Dict = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) A_ : Optional[int] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) A_ : Dict = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) A_ : List[str] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) A_ : Tuple = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) A_ : Optional[int] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) A_ : int = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) A_ : Tuple = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) A_ : Tuple = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) A_ : int = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) A_ : Tuple = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) A_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) A_ : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) A_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) A_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) A_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) A_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) A_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) A_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) A_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_MAPPING A_ : Tuple = auto_class_update(FlaxAutoModel) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING A_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING A_ : Tuple = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING A_ : Dict = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING A_ : Tuple = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING A_ : int = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class A_ ( _BaseAutoModelClass ): '''simple docstring''' a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING A_ : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
333
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ : Dict ={ 'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'], 'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : int =['BertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : List[Any] =[ 'BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BertForMaskedLM', 'BertForMultipleChoice', 'BertForNextSentencePrediction', 'BertForPreTraining', 'BertForQuestionAnswering', 'BertForSequenceClassification', 'BertForTokenClassification', 'BertLayer', 'BertLMHeadModel', 'BertModel', 'BertPreTrainedModel', 'load_tf_weights_in_bert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : List[Any] =[ 'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBertEmbeddings', 'TFBertForMaskedLM', 'TFBertForMultipleChoice', 'TFBertForNextSentencePrediction', 'TFBertForPreTraining', 'TFBertForQuestionAnswering', 'TFBertForSequenceClassification', 'TFBertForTokenClassification', 'TFBertLMHeadModel', 'TFBertMainLayer', 'TFBertModel', 'TFBertPreTrainedModel', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : int =['TFBertTokenizer'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[int] =[ 'FlaxBertForCausalLM', 'FlaxBertForMaskedLM', 'FlaxBertForMultipleChoice', 'FlaxBertForNextSentencePrediction', 'FlaxBertForPreTraining', 'FlaxBertForQuestionAnswering', 'FlaxBertForSequenceClassification', 'FlaxBertForTokenClassification', 'FlaxBertModel', 'FlaxBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys lowerCAmelCase__ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
257
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging A_ : Tuple = logging.get_logger(__name__) class A_ ( _a ): '''simple docstring''' a__ = "linear" a__ = "cosine" a__ = "cosine_with_restarts" a__ = "polynomial" a__ = "constant" a__ = "constant_with_warmup" a__ = "piecewise_constant" def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple: '''simple docstring''' return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) ) return 1.0 return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = {} __UpperCAmelCase = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: __UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' ) __UpperCAmelCase = int(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = float(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = value __UpperCAmelCase = float(rule_list[-1] ) def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): def rule_func(SCREAMING_SNAKE_CASE ) -> float: __UpperCAmelCase = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func __UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict: '''simple docstring''' def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) ) return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]: '''simple docstring''' __UpperCAmelCase = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' ) def lr_lambda(SCREAMING_SNAKE_CASE ): if current_step < num_warmup_steps: return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: __UpperCAmelCase = lr_init - lr_end __UpperCAmelCase = num_training_steps - num_warmup_steps __UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps __UpperCAmelCase = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , ) return schedule_func( SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
333
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class __lowercase ( unittest.TestCase ): """simple docstring""" def __A ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase = """ZinengTang/tvlt-base""" lowerCamelCase = tempfile.mkdtemp() def __A ( self , **A ) -> Optional[int]: '''simple docstring''' return TvltImageProcessor.from_pretrained(self.checkpoint , **lowercase__ ) def __A ( self , **A ) -> List[str]: '''simple docstring''' return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowercase__ ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __A ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_feature_extractor() lowerCamelCase = TvltProcessor(image_processor=lowercase__ , feature_extractor=lowercase__ ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , lowercase__ ) self.assertIsInstance(processor.image_processor , lowercase__ ) def __A ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_feature_extractor() lowerCamelCase = TvltProcessor(image_processor=lowercase__ , feature_extractor=lowercase__ ) lowerCamelCase = np.ones([1_20_00] ) lowerCamelCase = feature_extractor(lowercase__ , return_tensors="""np""" ) lowerCamelCase = processor(audio=lowercase__ , return_tensors="""np""" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self ) -> Any: '''simple docstring''' lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_feature_extractor() lowerCamelCase = TvltProcessor(image_processor=lowercase__ , feature_extractor=lowercase__ ) lowerCamelCase = np.ones([3, 2_24, 2_24] ) lowerCamelCase = image_processor(lowercase__ , return_tensors="""np""" ) lowerCamelCase = processor(images=lowercase__ , return_tensors="""np""" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self ) -> List[str]: '''simple docstring''' lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_feature_extractor() lowerCamelCase = TvltProcessor(image_processor=lowercase__ , feature_extractor=lowercase__ ) lowerCamelCase = np.ones([1_20_00] ) lowerCamelCase = np.ones([3, 2_24, 2_24] ) lowerCamelCase = processor(audio=lowercase__ , images=lowercase__ ) self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] ) # test if it raises when no input is passed with pytest.raises(lowercase__ ): processor() def __A ( self ) -> List[str]: '''simple docstring''' lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_feature_extractor() lowerCamelCase = TvltProcessor(image_processor=lowercase__ , feature_extractor=lowercase__ ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
252
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list: '''simple docstring''' __UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )] for i in range(SCREAMING_SNAKE_CASE ): __UpperCAmelCase = y_points[i] for i in range(2 , SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __UpperCAmelCase = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
333
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase : int = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : str = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
233
def __a ( SCREAMING_SNAKE_CASE ) -> set: '''simple docstring''' __UpperCAmelCase = set() # edges = list of graph's edges __UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: __UpperCAmelCase , __UpperCAmelCase = edges.pop() chosen_vertices.add(SCREAMING_SNAKE_CASE ) chosen_vertices.add(SCREAMING_SNAKE_CASE ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(SCREAMING_SNAKE_CASE ) return chosen_vertices def __a ( SCREAMING_SNAKE_CASE ) -> set: '''simple docstring''' __UpperCAmelCase = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
333
0
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCAmelCase__ ( _a ): """simple docstring""" a = 42 class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[str]=("DownEncoderBlock2D",) , __lowerCamelCase : Optional[Any]=(64,) , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[str]="silu" , __lowerCamelCase : Union[str, Any]=True , ) -> Optional[Any]: super().__init__() SCREAMING_SNAKE_CASE__ = layers_per_block SCREAMING_SNAKE_CASE__ = torch.nn.Convad( lowercase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = nn.ModuleList([] ) # down SCREAMING_SNAKE_CASE__ = block_out_channels[0] for i, down_block_type in enumerate(lowercase__ ): SCREAMING_SNAKE_CASE__ = output_channel SCREAMING_SNAKE_CASE__ = block_out_channels[i] SCREAMING_SNAKE_CASE__ = i == len(lowercase__ ) - 1 SCREAMING_SNAKE_CASE__ = get_down_block( lowercase__ , num_layers=self.layers_per_block , in_channels=lowercase__ , out_channels=lowercase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , ) self.down_blocks.append(lowercase__ ) # mid SCREAMING_SNAKE_CASE__ = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , ) # out SCREAMING_SNAKE_CASE__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase__ , eps=1e-6 ) SCREAMING_SNAKE_CASE__ = nn.SiLU() SCREAMING_SNAKE_CASE__ = 2 * out_channels if double_z else out_channels SCREAMING_SNAKE_CASE__ = nn.Convad(block_out_channels[-1] , lowercase__ , 3 , padding=1 ) SCREAMING_SNAKE_CASE__ = False def lowercase_ ( self : Any , __lowerCamelCase : int ) -> Any: SCREAMING_SNAKE_CASE__ = x SCREAMING_SNAKE_CASE__ = self.conv_in(lowercase__ ) if self.training and self.gradient_checkpointing: def create_custom_forward(__lowerCamelCase : Dict ): def custom_forward(*__lowerCamelCase : Dict ): return module(*lowercase__ ) return custom_forward # down if is_torch_version('''>=''' , '''1.11.0''' ): for down_block in self.down_blocks: SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint( create_custom_forward(lowercase__ ) , lowercase__ , use_reentrant=lowercase__ ) # middle SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase__ , use_reentrant=lowercase__ ) else: for down_block in self.down_blocks: SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ ) # middle SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase__ ) else: # down for down_block in self.down_blocks: SCREAMING_SNAKE_CASE__ = down_block(lowercase__ ) # middle SCREAMING_SNAKE_CASE__ = self.mid_block(lowercase__ ) # post-process SCREAMING_SNAKE_CASE__ = self.conv_norm_out(lowercase__ ) SCREAMING_SNAKE_CASE__ = self.conv_act(lowercase__ ) SCREAMING_SNAKE_CASE__ = self.conv_out(lowercase__ ) return sample class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , __lowerCamelCase : int=3 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Any=("UpDecoderBlock2D",) , __lowerCamelCase : Any=(64,) , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : List[str]="silu" , __lowerCamelCase : Optional[int]="group" , ) -> Optional[Any]: super().__init__() SCREAMING_SNAKE_CASE__ = layers_per_block SCREAMING_SNAKE_CASE__ = nn.Convad( lowercase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = nn.ModuleList([] ) SCREAMING_SNAKE_CASE__ = in_channels if norm_type == '''spatial''' else None # mid SCREAMING_SNAKE_CASE__ = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , ) # up SCREAMING_SNAKE_CASE__ = list(reversed(lowercase__ ) ) SCREAMING_SNAKE_CASE__ = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowercase__ ): SCREAMING_SNAKE_CASE__ = output_channel SCREAMING_SNAKE_CASE__ = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE__ = i == len(lowercase__ ) - 1 SCREAMING_SNAKE_CASE__ = get_up_block( lowercase__ , num_layers=self.layers_per_block + 1 , in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , resnet_time_scale_shift=lowercase__ , ) self.up_blocks.append(lowercase__ ) SCREAMING_SNAKE_CASE__ = output_channel # out if norm_type == "spatial": SCREAMING_SNAKE_CASE__ = SpatialNorm(block_out_channels[0] , lowercase__ ) else: SCREAMING_SNAKE_CASE__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase__ , eps=1e-6 ) SCREAMING_SNAKE_CASE__ = nn.SiLU() SCREAMING_SNAKE_CASE__ = nn.Convad(block_out_channels[0] , lowercase__ , 3 , padding=1 ) SCREAMING_SNAKE_CASE__ = False def lowercase_ ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any]=None ) -> int: SCREAMING_SNAKE_CASE__ = z SCREAMING_SNAKE_CASE__ = self.conv_in(lowercase__ ) SCREAMING_SNAKE_CASE__ = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__lowerCamelCase : Any ): def custom_forward(*__lowerCamelCase : List[Any] ): return module(*lowercase__ ) return custom_forward if is_torch_version('''>=''' , '''1.11.0''' ): # middle SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ ) SCREAMING_SNAKE_CASE__ = sample.to(lowercase__ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint( create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ ) else: # middle SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ = sample.to(lowercase__ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ ) else: # middle SCREAMING_SNAKE_CASE__ = self.mid_block(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ = sample.to(lowercase__ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE__ = up_block(lowercase__ , lowercase__ ) # post-process if latent_embeds is None: SCREAMING_SNAKE_CASE__ = self.conv_norm_out(lowercase__ ) else: SCREAMING_SNAKE_CASE__ = self.conv_norm_out(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ = self.conv_act(lowercase__ ) SCREAMING_SNAKE_CASE__ = self.conv_out(lowercase__ ) return sample class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , __lowerCamelCase : str="random" , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[Any]=True ) -> Tuple: super().__init__() SCREAMING_SNAKE_CASE__ = n_e SCREAMING_SNAKE_CASE__ = vq_embed_dim SCREAMING_SNAKE_CASE__ = beta SCREAMING_SNAKE_CASE__ = legacy SCREAMING_SNAKE_CASE__ = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) SCREAMING_SNAKE_CASE__ = remap if self.remap is not None: self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) ) SCREAMING_SNAKE_CASE__ = self.used.shape[0] SCREAMING_SNAKE_CASE__ = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": SCREAMING_SNAKE_CASE__ = self.re_embed SCREAMING_SNAKE_CASE__ = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: SCREAMING_SNAKE_CASE__ = n_e SCREAMING_SNAKE_CASE__ = sane_index_shape def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE__ = inds.shape assert len(lowercase__ ) > 1 SCREAMING_SNAKE_CASE__ = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE__ = self.used.to(lowercase__ ) SCREAMING_SNAKE_CASE__ = (inds[:, :, None] == used[None, None, ...]).long() SCREAMING_SNAKE_CASE__ = match.argmax(-1 ) SCREAMING_SNAKE_CASE__ = match.sum(2 ) < 1 if self.unknown_index == "random": SCREAMING_SNAKE_CASE__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: SCREAMING_SNAKE_CASE__ = self.unknown_index return new.reshape(lowercase__ ) def lowercase_ ( self : Dict , __lowerCamelCase : Tuple ) -> Any: SCREAMING_SNAKE_CASE__ = inds.shape assert len(lowercase__ ) > 1 SCREAMING_SNAKE_CASE__ = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE__ = self.used.to(lowercase__ ) if self.re_embed > self.used.shape[0]: # extra token SCREAMING_SNAKE_CASE__ = 0 # simply set to zero SCREAMING_SNAKE_CASE__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase__ ) return back.reshape(lowercase__ ) def lowercase_ ( self : List[Any] , __lowerCamelCase : str ) -> Any: # reshape z -> (batch, height, width, channel) and flatten SCREAMING_SNAKE_CASE__ = z.permute(0 , 2 , 3 , 1 ).contiguous() SCREAMING_SNAKE_CASE__ = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z SCREAMING_SNAKE_CASE__ = torch.argmin(torch.cdist(lowercase__ , self.embedding.weight ) , dim=1 ) SCREAMING_SNAKE_CASE__ = self.embedding(lowercase__ ).view(z.shape ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None # compute loss for embedding if not self.legacy: SCREAMING_SNAKE_CASE__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: SCREAMING_SNAKE_CASE__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients SCREAMING_SNAKE_CASE__ = z + (z_q - z).detach() # reshape back to match original input shape SCREAMING_SNAKE_CASE__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: SCREAMING_SNAKE_CASE__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE__ = self.remap_to_used(lowercase__ ) SCREAMING_SNAKE_CASE__ = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: SCREAMING_SNAKE_CASE__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowercase_ ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ) -> Dict: # shape specifying (batch, height, width, channel) if self.remap is not None: SCREAMING_SNAKE_CASE__ = indices.reshape(shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE__ = self.unmap_to_all(lowercase__ ) SCREAMING_SNAKE_CASE__ = indices.reshape(-1 ) # flatten again # get quantized latent vectors SCREAMING_SNAKE_CASE__ = self.embedding(lowercase__ ) if shape is not None: SCREAMING_SNAKE_CASE__ = z_q.view(lowercase__ ) # reshape back to match original input shape SCREAMING_SNAKE_CASE__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCAmelCase__ ( _a ): """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple=False ) -> Tuple: SCREAMING_SNAKE_CASE__ = parameters SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = torch.chunk(lowercase__ , 2 , dim=1 ) SCREAMING_SNAKE_CASE__ = torch.clamp(self.logvar , -30.0 , 20.0 ) SCREAMING_SNAKE_CASE__ = deterministic SCREAMING_SNAKE_CASE__ = torch.exp(0.5 * self.logvar ) SCREAMING_SNAKE_CASE__ = torch.exp(self.logvar ) if self.deterministic: SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowercase_ ( self : List[str] , __lowerCamelCase : Union[str, Any] = None ) -> torch.FloatTensor: # make sure sample is on the same device as the parameters and has same dtype SCREAMING_SNAKE_CASE__ = randn_tensor( self.mean.shape , generator=lowercase__ , device=self.parameters.device , dtype=self.parameters.dtype ) SCREAMING_SNAKE_CASE__ = self.mean + self.std * sample return x def lowercase_ ( self : List[str] , __lowerCamelCase : Dict=None ) -> Any: if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple=[1, 2, 3] ) -> Optional[Any]: if self.deterministic: return torch.Tensor([0.0] ) SCREAMING_SNAKE_CASE__ = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase__ ) def lowercase_ ( self : Optional[Any] ) -> List[str]: return self.mean
314
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} A_ : int = ['a', 'b', 'c', 'd', 'e'] def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = start # add current to visited visited.append(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # if all neighbors visited add current to sort sort.append(SCREAMING_SNAKE_CASE ) # if all vertices haven't been visited select a new one to visit if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ): for vertice in vertices: if vertice not in visited: __UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # return sort return sort if __name__ == "__main__": A_ : Tuple = topological_sort('a', [], []) print(sort)
333
0
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input A_ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = _ask_options( """In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: _snake_case : Dict = get_sagemaker_input() else: _snake_case : Optional[Any] = get_cluster_input() return config def UpperCAmelCase__ (snake_case__ : Optional[Any]=None ): """simple docstring""" if subparsers is not None: _snake_case : Dict = subparsers.add_parser("""config""" , description=snake_case__ ) else: _snake_case : Tuple = argparse.ArgumentParser("""Accelerate config command""" , description=snake_case__ ) parser.add_argument( """--config_file""" , default=snake_case__ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """ """such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """ """with \'huggingface\'.""" ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" _snake_case : List[Any] = get_user_input() if args.config_file is not None: _snake_case : str = args.config_file else: if not os.path.isdir(snake_case__ ): os.makedirs(snake_case__ ) _snake_case : Union[str, Any] = default_yaml_config_file if config_file.endswith(""".json""" ): config.to_json_file(snake_case__ ) else: config.to_yaml_file(snake_case__ ) print(F"accelerate configuration saved at {config_file}" ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : int = config_command_parser() _snake_case : Tuple = parser.parse_args() config_command(snake_case__ ) if __name__ == "__main__": main()
64
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A_ : int = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
333
0
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase ): UpperCamelCase =LongformerTokenizer UpperCamelCase =True UpperCamelCase =LongformerTokenizerFast UpperCamelCase =True def _lowerCamelCase ( self ) -> Any: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowercase : List[Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __lowercase : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) __lowercase : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowercase : Optional[Any] = {'''unk_token''': '''<unk>'''} __lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase__ ) ) def _lowerCamelCase ( self , **UpperCamelCase_ ) -> int: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : str = '''lower newer''' __lowercase : int = '''lower newer''' return input_text, output_text def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowercase : Optional[int] = '''lower newer''' __lowercase : List[Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowercase : Any = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True) self.assertListEqual(lowercase__ , lowercase__ ) __lowercase : Any = tokens + [tokenizer.unk_token] __lowercase : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ ) def _lowerCamelCase ( self ) -> int: __lowercase : Dict = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def _lowerCamelCase ( self ) -> int: __lowercase : List[Any] = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' ) __lowercase : Any = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ ) __lowercase : Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ ) __lowercase : int = tokenizer.encode( '''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __lowercase : Any = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase__ ) __lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _lowerCamelCase ( self ) -> Any: __lowercase : List[str] = self.get_tokenizer() __lowercase : Optional[int] = '''Encode this sequence.''' __lowercase : Any = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __lowercase : int = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __lowercase : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) __lowercase : Optional[Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __lowercase : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowercase__ , lowercase__ ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __lowercase : str = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) __lowercase : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) # Testing spaces after special tokens __lowercase : Optional[Any] = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space __lowercase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase__ ) __lowercase : Tuple = '''Encode <mask> sequence''' __lowercase : int = '''Encode <mask>sequence''' __lowercase : Dict = tokenizer.encode(lowercase__ ) __lowercase : List[Any] = encoded.index(lowercase__ ) __lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowercase__ , lowercase__ ) __lowercase : Any = tokenizer.encode(lowercase__ ) __lowercase : int = encoded.index(lowercase__ ) __lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) def _lowerCamelCase ( self ) -> Tuple: pass def _lowerCamelCase ( self ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowercase : str = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __lowercase : int = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __lowercase : Any = '''A, <mask> AllenNLP sentence.''' __lowercase : int = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) __lowercase : Union[str, Any] = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __lowercase : int = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __lowercase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def _lowerCamelCase ( self ) -> Optional[int]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __lowercase : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __lowercase : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ ) self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ ) self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ ) def _lowerCamelCase ( self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowercase : str = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __lowercase : List[Any] = F"""{text_of_1_token} {text_of_1_token}""" __lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __lowercase : Tuple = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) __lowercase : Dict = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __lowercase : List[Any] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) __lowercase : str = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __lowercase : Any = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) __lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __lowercase : Any = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) __lowercase : List[Any] = F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __lowercase : Tuple = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __lowercase : Tuple = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) __lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __lowercase : List[str] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) __lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __lowercase : Optional[Any] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
249
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: '''simple docstring''' model.train() __UpperCAmelCase = model(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]: '''simple docstring''' set_seed(4_2 ) __UpperCAmelCase = RegressionModel() __UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = RegressionDataset(length=8_0 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) model.to(accelerator.device ) if sched: __UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 ) __UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 ) __UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 ) __UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 ) # Make a copy of `model` if sched: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' # Test when on a single CPU or GPU that the context manager does nothing __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) # Use a single batch __UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] def __a ( SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' # Test on distributed setup that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) # Use a single batch __UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]: '''simple docstring''' __UpperCAmelCase = Accelerator( split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ): __UpperCAmelCase , __UpperCAmelCase = batch.values() # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) with accelerator.accumulate(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )] GradientState._reset_state() def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = Accelerator( split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ): __UpperCAmelCase , __UpperCAmelCase = batch.values() # Gather the distributed inputs and targs for the base model __UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(SCREAMING_SNAKE_CASE ): step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' __UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )) if accelerator.num_processes > 1: check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def __a ( ) -> str: '''simple docstring''' __UpperCAmelCase = Accelerator() __UpperCAmelCase = RegressionDataset(length=8_0 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) __UpperCAmelCase = RegressionDataset(length=9_6 ) __UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 ) __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE ) if iteration < len(SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE ) if batch_num < len(SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __a ( ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase = Accelerator() __UpperCAmelCase = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(SCREAMING_SNAKE_CASE ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(SCREAMING_SNAKE_CASE ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
333
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __magic_name__ ( __a : Tuple ): '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __magic_name__ ( __a : List[str] ): '''simple docstring''' UpperCamelCase__ = create_tensor(__a ) UpperCamelCase__ = gather(__a ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __magic_name__ ( __a : Any ): '''simple docstring''' UpperCamelCase__ = [state.process_index] UpperCamelCase__ = gather_object(__a ) assert len(__a ) == state.num_processes, f"{gathered_obj}, {len(__a )} != {state.num_processes}" assert gathered_obj == list(range(state.num_processes ) ), f"{gathered_obj} != {list(range(state.num_processes ) )}" def __magic_name__ ( __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = create_tensor(__a ) UpperCamelCase__ = broadcast(__a ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __magic_name__ ( __a : Tuple ): '''simple docstring''' if state.is_main_process: UpperCamelCase__ = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCamelCase__ = torch.arange(state.num_processes ).to(state.device ) UpperCamelCase__ = pad_across_processes(__a ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __magic_name__ ( __a : Optional[int] ): '''simple docstring''' if state.num_processes != 2: return UpperCamelCase__ = create_tensor(__a ) UpperCamelCase__ = reduce(__a , """sum""" ) UpperCamelCase__ = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(__a , __a ), f"{reduced_tensor} != {truth_tensor}" def __magic_name__ ( __a : Any ): '''simple docstring''' if state.num_processes != 2: return UpperCamelCase__ = create_tensor(__a ) UpperCamelCase__ = reduce(__a , """mean""" ) UpperCamelCase__ = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(__a , __a ), f"{reduced_tensor} != {truth_tensor}" def __magic_name__ ( __a : List[Any] ): '''simple docstring''' main() def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = PartialState() state.print(f"State: {state}" ) state.print("""testing gather""" ) test_gather(__a ) state.print("""testing gather_object""" ) test_gather_object(__a ) state.print("""testing broadcast""" ) test_broadcast(__a ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(__a ) state.print("""testing reduce_sum""" ) test_reduce_sum(__a ) state.print("""testing reduce_mean""" ) test_reduce_mean(__a ) if __name__ == "__main__": main()
244
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore A_ : Optional[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" A_ : Optional[Any] = [file for file in filepaths if file != file.lower()] if upper_files: print(F"""{len(upper_files)} files contain uppercase characters:""") print('\n'.join(upper_files) + '\n') A_ : Tuple = [file for file in filepaths if ' ' in file] if space_files: print(F"""{len(space_files)} files contain space characters:""") print('\n'.join(space_files) + '\n') A_ : str = [file for file in filepaths if '-' in file] if hyphen_files: print(F"""{len(hyphen_files)} files contain hyphen characters:""") print('\n'.join(hyphen_files) + '\n') A_ : Optional[Any] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"""{len(nodir_files)} files are not in a directory:""") print('\n'.join(nodir_files) + '\n') A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
333
0
import functools def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int: '''simple docstring''' if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or not all(isinstance(_UpperCAmelCase, _UpperCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(_UpperCAmelCase ) != 3 or not all(isinstance(_UpperCAmelCase, _UpperCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(_UpperCAmelCase ) == 0: return 0 if min(_UpperCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(_UpperCAmelCase ) >= 366: raise ValueError('All days elements should be less than 366' ) lowerCAmelCase : List[str] = set(_UpperCAmelCase ) @functools.cache def dynamic_programming(_UpperCAmelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
138
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' __UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] __UpperCAmelCase = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key: return input_string for position, character in enumerate(SCREAMING_SNAKE_CASE ): __UpperCAmelCase = position % (lowest * 2) # puts it in bounds __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid] __UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE ) return output_string def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' __UpperCAmelCase = [] __UpperCAmelCase = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1: return input_string __UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template for position in range(len(SCREAMING_SNAKE_CASE ) ): __UpperCAmelCase = position % (lowest * 2) # puts it in bounds __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('''*''' ) __UpperCAmelCase = 0 for row in temp_grid: # fills in the characters __UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )] grid.append(list(SCREAMING_SNAKE_CASE ) ) counter += len(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = '''''' # reads as zigzag for position in range(len(SCREAMING_SNAKE_CASE ) ): __UpperCAmelCase = position % (lowest * 2) # puts it in bounds __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]: '''simple docstring''' __UpperCAmelCase = {} for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key __UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return results if __name__ == "__main__": import doctest doctest.testmod()
333
0
"""simple docstring""" import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def lowerCamelCase ( _UpperCamelCase : Dict ) -> List[Any]: '''simple docstring''' if hor == 1_2_8: __UpperCAmelCase : Optional[int] = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") __UpperCAmelCase : Optional[Any] = (3_2, 1_2_8, 2_5_6) __UpperCAmelCase : int = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 3_2: __UpperCAmelCase : Tuple = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") __UpperCAmelCase : Union[str, Any] = (3_2, 6_4, 1_2_8, 2_5_6) __UpperCAmelCase : str = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") __UpperCAmelCase : str = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) __UpperCAmelCase : Tuple = model.state_dict() __UpperCAmelCase : str = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 1_4, """out_channels""": 1_4, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5_5_3_6, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } __UpperCAmelCase : List[Any] = UNetaDModel(**_UpperCamelCase ) print(f'''length of state dict: {len(state_dict.keys() )}''' ) print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) __UpperCAmelCase : Any = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __UpperCAmelCase : Dict = state_dict.pop(_UpperCamelCase ) hf_value_function.load_state_dict(_UpperCamelCase ) torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(_UpperCamelCase , _UpperCamelCase ) def lowerCamelCase ( ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = { """in_channels""": 1_4, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (3_2, 6_4, 1_2_8, 2_5_6), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5_5_3_6, """out_channels""": 1_4, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } __UpperCAmelCase : Any = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) __UpperCAmelCase : List[str] = model __UpperCAmelCase : Optional[Any] = UNetaDModel(**_UpperCamelCase ) print(f'''length of state dict: {len(state_dict.keys() )}''' ) print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) __UpperCAmelCase : List[str] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __UpperCAmelCase : Union[str, Any] = state_dict.pop(_UpperCamelCase ) hf_value_function.load_state_dict(_UpperCamelCase ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(_UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
115
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class A_ ( _a , _a , _a , unittest.TestCase ): '''simple docstring''' a__ = StableUnCLIPPipeline a__ = TEXT_TO_IMAGE_PARAMS a__ = TEXT_TO_IMAGE_BATCH_PARAMS a__ = TEXT_TO_IMAGE_IMAGE_PARAMS a__ = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false a__ = False def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = 32 __UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) __UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ ) __UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , ) torch.manual_seed(0 ) __UpperCAmelCase = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL() __UpperCAmelCase = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]: if str(lowercase__ ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(lowercase__ ) else: __UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) __UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ ) def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=lowercase__ ) @slow @require_torch_gpu class A_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ (self ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' ) __UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) __UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
333
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class _UpperCamelCase ( _a , unittest.TestCase ): '''simple docstring''' _A : List[str] = ShapEPipeline _A : int = ['''prompt'''] _A : int = ['''prompt'''] _A : Union[str, Any] = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] _A : int = False @property def UpperCamelCase__ ( self : Optional[int] ): """simple docstring""" return 3_2 @property def UpperCamelCase__ ( self : Any ): """simple docstring""" return 3_2 @property def UpperCamelCase__ ( self : Optional[Any] ): """simple docstring""" return self.time_input_dim * 4 @property def UpperCamelCase__ ( self : Dict ): """simple docstring""" return 8 @property def UpperCamelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def UpperCamelCase__ ( self : str ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModelWithProjection(lowercase__ ) @property def UpperCamelCase__ ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[str] = { """num_attention_heads""": 2, """attention_head_dim""": 1_6, """embedding_dim""": self.time_input_dim, """num_embeddings""": 3_2, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } __SCREAMING_SNAKE_CASE : int = PriorTransformer(**lowercase__ ) return model @property def UpperCamelCase__ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Tuple = { """param_shapes""": ( (self.renderer_dim, 9_3), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 1_2, """background""": ( 0.1, 0.1, 0.1, ), } __SCREAMING_SNAKE_CASE : List[str] = ShapERenderer(**lowercase__ ) return model def UpperCamelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.dummy_prior __SCREAMING_SNAKE_CASE : int = self.dummy_text_encoder __SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_tokenizer __SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_renderer __SCREAMING_SNAKE_CASE : str = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1_0_2_4 , prediction_type="""sample""" , use_karras_sigmas=lowercase__ , clip_sample=lowercase__ , clip_sample_range=1.0 , ) __SCREAMING_SNAKE_CASE : List[Any] = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=0 ): """simple docstring""" if str(lowercase__ ).startswith("""mps""" ): __SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(lowercase__ ) else: __SCREAMING_SNAKE_CASE : Any = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) __SCREAMING_SNAKE_CASE : Dict = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 3_2, """output_type""": """np""", } return inputs def UpperCamelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = """cpu""" __SCREAMING_SNAKE_CASE : int = self.get_dummy_components() __SCREAMING_SNAKE_CASE : str = self.pipeline_class(**lowercase__ ) __SCREAMING_SNAKE_CASE : List[Any] = pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**self.get_dummy_inputs(lowercase__ ) ) __SCREAMING_SNAKE_CASE : List[str] = output.images[0] __SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1] assert image.shape == (2_0, 3_2, 3_2, 3) __SCREAMING_SNAKE_CASE : List[str] = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self : Union[str, Any] ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = torch_device == """cpu""" __SCREAMING_SNAKE_CASE : List[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowercase__ , relax_max_difference=lowercase__ , ) def UpperCamelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Optional[Any] = self.pipeline_class(**lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) __SCREAMING_SNAKE_CASE : List[str] = 1 __SCREAMING_SNAKE_CASE : Tuple = 2 __SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(lowercase__ ) for key in inputs.keys(): if key in self.batch_params: __SCREAMING_SNAKE_CASE : List[Any] = batch_size * [inputs[key]] __SCREAMING_SNAKE_CASE : Dict = pipe(**lowercase__ , num_images_per_prompt=lowercase__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self : Any ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" ) __SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) __SCREAMING_SNAKE_CASE : int = torch.Generator(device=lowercase__ ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[Any] = pipe( """a shark""" , generator=lowercase__ , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="""np""" , ).images[0] assert images.shape == (2_0, 6_4, 6_4, 3) assert_mean_pixel_difference(lowercase__ , lowercase__ )
112
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A_ : int = logging.get_logger(__name__) A_ : str = {'tokenizer_file': 'tokenizer.json'} A_ : List[str] = { 'tokenizer_file': { 'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json', 'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json', 'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json', 'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json', 'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json', 'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json', 'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json', }, } class A_ ( _a ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = ["input_ids", "attention_mask"] a__ = None def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict: super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , ) __UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: __UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) __UpperCAmelCase = add_prefix_space __UpperCAmelCase = pre_tok_class(**lowercase__ ) __UpperCAmelCase = add_prefix_space def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding: __UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ''' pretokenized inputs.''' ) return super()._batch_encode_plus(*lowercase__ , **lowercase__ ) def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding: __UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ''' pretokenized inputs.''' ) return super()._encode_plus(*lowercase__ , **lowercase__ ) def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]: __UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def lowerCAmelCase_ (self , lowercase__ ) -> List[int]: __UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: __UpperCAmelCase = input_ids[-self.model_max_length :] return input_ids
333
0
class snake_case_ : def __init__( self : Any , lowercase_ : List[Any] = "" , lowercase_ : Tuple = False ) -> None: # Mapping from the first character of the prefix of the node lowercase__ : Union[str, Any] = {} # A node will be a leaf if the tree contains its word lowercase__ : Any = is_leaf lowercase__ : Tuple = prefix def __UpperCamelCase ( self : Dict , lowercase_ : Any ) -> tuple[str, str, str]: lowercase__ : int = 0 for q, w in zip(self.prefix , lowercase__ ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def __UpperCamelCase ( self : Tuple , lowercase_ : int ) -> None: for word in words: self.insert(lowercase__ ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[int] ) -> None: # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: lowercase__ : Optional[int] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: lowercase__ : List[Any] = RadixNode(prefix=lowercase__ , is_leaf=lowercase__ ) else: lowercase__ : Dict = self.nodes[word[0]] lowercase__ , lowercase__ , lowercase__ : Any = incoming_node.match( lowercase__ ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(lowercase__ ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: lowercase__ : Dict = remaining_prefix lowercase__ : Optional[Any] = self.nodes[matching_string[0]] lowercase__ : Union[str, Any] = RadixNode(lowercase__ , lowercase__ ) lowercase__ : int = aux_node if remaining_word == "": lowercase__ : int = True else: self.nodes[matching_string[0]].insert(lowercase__ ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str ) -> bool: lowercase__ : List[str] = self.nodes.get(word[0] , lowercase__ ) if not incoming_node: return False else: lowercase__ , lowercase__ , lowercase__ : str = incoming_node.match( lowercase__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(lowercase__ ) def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> bool: lowercase__ : List[Any] = self.nodes.get(word[0] , lowercase__ ) if not incoming_node: return False else: lowercase__ , lowercase__ , lowercase__ : Optional[int] = incoming_node.match( lowercase__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(lowercase__ ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: lowercase__ : Dict = list(self.nodes.values() )[0] lowercase__ : int = merging_node.is_leaf self.prefix += merging_node.prefix lowercase__ : List[str] = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: lowercase__ : List[Any] = False # If there is 1 edge, we merge it with its child else: lowercase__ : int = list(incoming_node.nodes.values() )[0] lowercase__ : Optional[Any] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix lowercase__ : Union[str, Any] = merging_node.nodes return True def __UpperCamelCase ( self : str , lowercase_ : Dict = 0 ) -> None: if self.prefix != "": print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def lowercase_ ( ): lowercase__ : Any = "banana bananas bandana band apple all beast".split() lowercase__ : Optional[int] = RadixNode() root.insert_many(_lowerCamelCase) assert all(root.find(_lowerCamelCase) for word in words) assert not root.find("bandanas") assert not root.find("apps") root.delete("all") assert not root.find("all") root.delete("banana") assert not root.find("banana") assert root.find("bananas") return True def lowercase_ ( ): assert test_trie() def lowercase_ ( ): lowercase__ : Optional[Any] = RadixNode() lowercase__ : Union[str, Any] = "banana bananas bandanas bandana band apple all beast".split() root.insert_many(_lowerCamelCase) print("Words:" , _lowerCamelCase) print("Tree:") root.print_tree() if __name__ == "__main__": main()
87
import math import sys def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' if number != int(SCREAMING_SNAKE_CASE ): raise ValueError('''the value of input must be a natural number''' ) if number < 0: raise ValueError('''the value of input must not be a negative number''' ) if number == 0: return 1 __UpperCAmelCase = [-1] * (number + 1) __UpperCAmelCase = 0 for i in range(1 , number + 1 ): __UpperCAmelCase = sys.maxsize __UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE ) ) for j in range(1 , root + 1 ): __UpperCAmelCase = 1 + answers[i - (j**2)] __UpperCAmelCase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCAmelCase = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
333
0
_lowerCamelCase =tuple[float, float, float] _lowerCamelCase =tuple[float, float, float] def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =end_pointa[0] - end_pointa[0] SCREAMING_SNAKE_CASE =end_pointa[1] - end_pointa[1] SCREAMING_SNAKE_CASE =end_pointa[2] - end_pointa[2] return (x, y, z) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =ab[1] * ac[2] - ab[2] * ac[1] # *i SCREAMING_SNAKE_CASE =(ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j SCREAMING_SNAKE_CASE =ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" return tuple(round(lowerCAmelCase_, lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 10 ): """simple docstring""" SCREAMING_SNAKE_CASE =create_vector(lowerCAmelCase_, lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =create_vector(lowerCAmelCase_, lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_, lowerCAmelCase_ ), lowerCAmelCase_ )
334
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class a_ : """simple docstring""" def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,): SCREAMING_SNAKE_CASE =parent SCREAMING_SNAKE_CASE =100 SCREAMING_SNAKE_CASE =batch_size SCREAMING_SNAKE_CASE =image_size SCREAMING_SNAKE_CASE =patch_size SCREAMING_SNAKE_CASE =num_channels SCREAMING_SNAKE_CASE =is_training SCREAMING_SNAKE_CASE =use_labels SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =type_sequence_label_size SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =scope SCREAMING_SNAKE_CASE =out_indices SCREAMING_SNAKE_CASE =num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE =num_patches + 1 def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =None if self.use_labels: SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size ) SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) SCREAMING_SNAKE_CASE =self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCAmelCase ( self : Dict ): return BeitConfig( vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,) def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ): SCREAMING_SNAKE_CASE =BeitModel(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ): SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ): SCREAMING_SNAKE_CASE =self.type_sequence_label_size SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE =1 SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ): SCREAMING_SNAKE_CASE =self.num_labels SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _lowerCAmelCase ( self : str ): SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values} return config, inputs_dict @require_torch class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) __UpperCAmelCase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =BeitModelTester(self ) SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 ) def _lowerCAmelCase ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason='BEiT does not use inputs_embeds' ) def _lowerCAmelCase ( self : List[Any] ): pass @require_torch_multi_gpu @unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _lowerCAmelCase ( self : Union[str, Any] ): pass def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE =model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) SCREAMING_SNAKE_CASE =model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) ) def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE =model_class(snake_case ) SCREAMING_SNAKE_CASE =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE =['pixel_values'] self.assertListEqual(arg_names[:1] ,snake_case ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case ) def _lowerCAmelCase ( self : Any ): if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE =True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]: continue SCREAMING_SNAKE_CASE =model_class(snake_case ) model.to(snake_case ) model.train() SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case ) SCREAMING_SNAKE_CASE =model(**snake_case ).loss loss.backward() def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE =False SCREAMING_SNAKE_CASE =True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue SCREAMING_SNAKE_CASE =model_class(snake_case ) model.gradient_checkpointing_enable() model.to(snake_case ) model.train() SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case ) SCREAMING_SNAKE_CASE =model(**snake_case ).loss loss.backward() def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE =_config_zero_init(snake_case ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE =model_class(config=snake_case ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,) @slow def _lowerCAmelCase ( self : List[str] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a_ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self : Tuple ): return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case ) SCREAMING_SNAKE_CASE =self.default_image_processor SCREAMING_SNAKE_CASE =prepare_img() SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case ) # prepare bool_masked_pos SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case ) SCREAMING_SNAKE_CASE =outputs.logits # verify the logits SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape ,snake_case ) SCREAMING_SNAKE_CASE =torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) ) @slow def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case ) SCREAMING_SNAKE_CASE =self.default_image_processor SCREAMING_SNAKE_CASE =prepare_img() SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**snake_case ) SCREAMING_SNAKE_CASE =outputs.logits # verify the logits SCREAMING_SNAKE_CASE =torch.Size((1, 1000) ) self.assertEqual(logits.shape ,snake_case ) SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case ) self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) ) SCREAMING_SNAKE_CASE =281 self.assertEqual(logits.argmax(-1 ).item() ,snake_case ) @slow def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to( snake_case ) SCREAMING_SNAKE_CASE =self.default_image_processor SCREAMING_SNAKE_CASE =prepare_img() SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**snake_case ) SCREAMING_SNAKE_CASE =outputs.logits # verify the logits SCREAMING_SNAKE_CASE =torch.Size((1, 21841) ) self.assertEqual(logits.shape ,snake_case ) SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case ) self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) ) SCREAMING_SNAKE_CASE =2396 self.assertEqual(logits.argmax(-1 ).item() ,snake_case ) @slow def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) SCREAMING_SNAKE_CASE =model.to(snake_case ) SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case ) SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' ) SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**snake_case ) SCREAMING_SNAKE_CASE =outputs.logits # verify the logits SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape ,snake_case ) SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' ) if is_pillow_less_than_a: SCREAMING_SNAKE_CASE =torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ] ,device=snake_case ,) else: SCREAMING_SNAKE_CASE =torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ] ,device=snake_case ,) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) ) @slow def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) SCREAMING_SNAKE_CASE =model.to(snake_case ) SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case ) SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' ) SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**snake_case ) SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] ) SCREAMING_SNAKE_CASE =torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape ,snake_case ) SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ) SCREAMING_SNAKE_CASE =torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape ,snake_case )
334
1
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE =tau * frequency / samplerate SCREAMING_SNAKE_CASE =sin(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =cos(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =_sin / (2 * q_factor) SCREAMING_SNAKE_CASE =(1 - _cos) / 2 SCREAMING_SNAKE_CASE =1 - _cos SCREAMING_SNAKE_CASE =1 + alpha SCREAMING_SNAKE_CASE =-2 * _cos SCREAMING_SNAKE_CASE =1 - alpha SCREAMING_SNAKE_CASE =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE =tau * frequency / samplerate SCREAMING_SNAKE_CASE =sin(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =cos(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =_sin / (2 * q_factor) SCREAMING_SNAKE_CASE =(1 + _cos) / 2 SCREAMING_SNAKE_CASE =-1 - _cos SCREAMING_SNAKE_CASE =1 + alpha SCREAMING_SNAKE_CASE =-2 * _cos SCREAMING_SNAKE_CASE =1 - alpha SCREAMING_SNAKE_CASE =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE =tau * frequency / samplerate SCREAMING_SNAKE_CASE =sin(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =cos(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =_sin / (2 * q_factor) SCREAMING_SNAKE_CASE =_sin / 2 SCREAMING_SNAKE_CASE =0 SCREAMING_SNAKE_CASE =-ba SCREAMING_SNAKE_CASE =1 + alpha SCREAMING_SNAKE_CASE =-2 * _cos SCREAMING_SNAKE_CASE =1 - alpha SCREAMING_SNAKE_CASE =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE =tau * frequency / samplerate SCREAMING_SNAKE_CASE =sin(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =cos(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =_sin / (2 * q_factor) SCREAMING_SNAKE_CASE =1 - alpha SCREAMING_SNAKE_CASE =-2 * _cos SCREAMING_SNAKE_CASE =1 + alpha SCREAMING_SNAKE_CASE =IIRFilter(2 ) filt.set_coefficients([ba, ba, ba], [ba, ba, ba] ) return filt def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 1 / sqrt(2 ), ): """simple docstring""" SCREAMING_SNAKE_CASE =tau * frequency / samplerate SCREAMING_SNAKE_CASE =sin(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =cos(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =_sin / (2 * q_factor) SCREAMING_SNAKE_CASE =10 ** (gain_db / 40) SCREAMING_SNAKE_CASE =1 + alpha * big_a SCREAMING_SNAKE_CASE =-2 * _cos SCREAMING_SNAKE_CASE =1 - alpha * big_a SCREAMING_SNAKE_CASE =1 + alpha / big_a SCREAMING_SNAKE_CASE =-2 * _cos SCREAMING_SNAKE_CASE =1 - alpha / big_a SCREAMING_SNAKE_CASE =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 1 / sqrt(2 ), ): """simple docstring""" SCREAMING_SNAKE_CASE =tau * frequency / samplerate SCREAMING_SNAKE_CASE =sin(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =cos(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =_sin / (2 * q_factor) SCREAMING_SNAKE_CASE =10 ** (gain_db / 40) SCREAMING_SNAKE_CASE =(big_a + 1) - (big_a - 1) * _cos SCREAMING_SNAKE_CASE =(big_a + 1) + (big_a - 1) * _cos SCREAMING_SNAKE_CASE =(big_a - 1) - (big_a + 1) * _cos SCREAMING_SNAKE_CASE =(big_a - 1) + (big_a + 1) * _cos SCREAMING_SNAKE_CASE =2 * sqrt(lowerCAmelCase_ ) * alpha SCREAMING_SNAKE_CASE =big_a * (pmc + aaa) SCREAMING_SNAKE_CASE =2 * big_a * mpc SCREAMING_SNAKE_CASE =big_a * (pmc - aaa) SCREAMING_SNAKE_CASE =ppmc + aaa SCREAMING_SNAKE_CASE =-2 * pmpc SCREAMING_SNAKE_CASE =ppmc - aaa SCREAMING_SNAKE_CASE =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 1 / sqrt(2 ), ): """simple docstring""" SCREAMING_SNAKE_CASE =tau * frequency / samplerate SCREAMING_SNAKE_CASE =sin(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =cos(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =_sin / (2 * q_factor) SCREAMING_SNAKE_CASE =10 ** (gain_db / 40) SCREAMING_SNAKE_CASE =(big_a + 1) - (big_a - 1) * _cos SCREAMING_SNAKE_CASE =(big_a + 1) + (big_a - 1) * _cos SCREAMING_SNAKE_CASE =(big_a - 1) - (big_a + 1) * _cos SCREAMING_SNAKE_CASE =(big_a - 1) + (big_a + 1) * _cos SCREAMING_SNAKE_CASE =2 * sqrt(lowerCAmelCase_ ) * alpha SCREAMING_SNAKE_CASE =big_a * (ppmc + aaa) SCREAMING_SNAKE_CASE =-2 * big_a * pmpc SCREAMING_SNAKE_CASE =big_a * (ppmc - aaa) SCREAMING_SNAKE_CASE =pmc + aaa SCREAMING_SNAKE_CASE =2 * mpc SCREAMING_SNAKE_CASE =pmc - aaa SCREAMING_SNAKE_CASE =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt
334
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase =logging.get_logger() @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self : List[str] ,snake_case : Tensor ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def _lowerCAmelCase ( self : Optional[Any] ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) ) @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 1 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = True def __call__( self : str ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) ) SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) ) if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch: raise Exception( f'Numbers of operations are different. Source module has {len(snake_case )} operations while' f' destination module has {len(snake_case )}.' ) for dest_m, src_m in zip(snake_case ,snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}' ) class a_ ( nn.Module ): """simple docstring""" def __init__( self : Any ,snake_case : nn.Module ): super().__init__() SCREAMING_SNAKE_CASE =[] # - get the stem feature_blocks.append(('conv1', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('block' ), f'Unexpected layer name {k}' SCREAMING_SNAKE_CASE =len(snake_case ) + 1 feature_blocks.append((f'res{block_index}', v) ) SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case ) def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ): return get_trunk_forward_outputs( snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,) class a_ ( lowerCamelCase_ ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ): SCREAMING_SNAKE_CASE =x.split('-' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : Optional[Any] ,snake_case : str ): # default to timm! if x not in self: SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case ) SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) ) else: SCREAMING_SNAKE_CASE =super().__getitem__(snake_case ) return val class a_ ( lowerCamelCase_ ): """simple docstring""" def __getitem__( self : int ,snake_case : str ): if "seer" in x and "in1k" not in x: SCREAMING_SNAKE_CASE =RegNetModel else: SCREAMING_SNAKE_CASE =RegNetForImageClassification return val def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" for from_key, to_key in keys: SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone() print(F'Copied key={from_key} to={to_key}' ) return to_state_dict def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ): """simple docstring""" print(F'Converting {name}...' ) with torch.no_grad(): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func() SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval() SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) ) module_transfer(lowerCAmelCase_ ) if from_state_dict is not None: SCREAMING_SNAKE_CASE =[] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')] SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ ) our_model.load_state_dict(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =( our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state ) SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1] assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, ) SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384 # we can use the convnext one SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ ) image_processor.push_to_hub( repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, ) print(F'Pushed {name}' ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ): """simple docstring""" SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE =1000 SCREAMING_SNAKE_CASE =(1, num_labels) SCREAMING_SNAKE_CASE ='huggingface/label-files' SCREAMING_SNAKE_CASE =num_labels SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) ) SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =idalabel SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE ={ 'regnet-x-002': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ), 'regnet-x-004': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ), 'regnet-x-006': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ), 'regnet-x-008': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ), 'regnet-x-016': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ), 'regnet-x-032': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ), 'regnet-x-040': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ), 'regnet-x-064': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ), 'regnet-x-080': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ), 'regnet-x-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ), 'regnet-x-160': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ), 'regnet-x-320': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ), # y variant 'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ), 'regnet-y-004': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ), 'regnet-y-006': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ), 'regnet-y-008': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ), 'regnet-y-016': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ), 'regnet-y-032': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ), 'regnet-y-040': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ), 'regnet-y-064': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ), 'regnet-y-080': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ), 'regnet-y-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ), 'regnet-y-160': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ), 'regnet-y-320': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ), 'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ), 'regnet-y-1280-seer': RegNetConfig( depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ), 'regnet-y-2560-seer': RegNetConfig( depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ), 'regnet-y-10b-seer': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), # finetuned on imagenet 'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ), 'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ), 'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ), 'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ), 'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), } SCREAMING_SNAKE_CASE =NameToOurModelFuncMap() SCREAMING_SNAKE_CASE =NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]: SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' ) SCREAMING_SNAKE_CASE =model_func() # check if we have a head, if yes add it SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model'] SCREAMING_SNAKE_CASE =model_state_dict['trunk'] model.load_state_dict(lowerCAmelCase_ ) return model.eval(), model_state_dict["heads"] # pretrained SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), ) # IN1K finetuned SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), ) if model_name: convert_weight_and_push( lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ) return config, expected_shape if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported regnet* architecture," " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) _lowerCamelCase =parser.parse_args() _lowerCamelCase =args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
1
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = '' __UpperCAmelCase = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self : Tuple ,snake_case : Optional[DatasetInfo] = None ,snake_case : Optional[str] = None ,**snake_case : Tuple ,): super().__init__(self ,**snake_case ) SCREAMING_SNAKE_CASE =repo_info SCREAMING_SNAKE_CASE =token SCREAMING_SNAKE_CASE =None def _lowerCAmelCase ( self : List[Any] ): if self.dir_cache is None: SCREAMING_SNAKE_CASE ={} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes SCREAMING_SNAKE_CASE ={ 'name': hf_file.rfilename, 'size': None, 'type': 'file', } self.dir_cache.update( { str(snake_case ): {'name': str(snake_case ), 'size': None, 'type': 'directory'} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : str ,snake_case : str = "rb" ,**snake_case : Dict ,): if not isinstance(self.repo_info ,snake_case ): raise NotImplementedError(f'Open is only implemented for dataset repositories, but got {self.repo_info}' ) SCREAMING_SNAKE_CASE =hf_hub_url(self.repo_info.id ,snake_case ,revision=self.repo_info.sha ) return fsspec.open( snake_case ,mode=snake_case ,headers=get_authentication_headers_for_url(snake_case ,use_auth_token=self.token ) ,client_kwargs={'trust_env': True} ,).open() def _lowerCAmelCase ( self : Dict ,snake_case : Optional[int] ,**snake_case : Optional[Any] ): self._get_dirs() SCREAMING_SNAKE_CASE =self._strip_protocol(snake_case ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(snake_case ) def _lowerCAmelCase ( self : str ,snake_case : Any ,snake_case : Union[str, Any]=False ,**snake_case : str ): self._get_dirs() SCREAMING_SNAKE_CASE =PurePosixPath(path.strip('/' ) ) SCREAMING_SNAKE_CASE ={} for p, f in self.dir_cache.items(): SCREAMING_SNAKE_CASE =PurePosixPath(p.strip('/' ) ) SCREAMING_SNAKE_CASE =p.parent if root == path: SCREAMING_SNAKE_CASE =f SCREAMING_SNAKE_CASE =list(paths.values() ) if detail: return out else: return sorted(f['name'] for f in out )
334
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCamelCase =16 _lowerCamelCase =32 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' ) SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE =datasets.map( lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE =16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE =8 else: SCREAMING_SNAKE_CASE =None return tokenizer.pad( lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE =DataLoader( tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =DataLoader( tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCamelCase =mocked_dataloaders # noqa: F811 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1": SCREAMING_SNAKE_CASE =2 # Initialize accelerator SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE =config['lr'] SCREAMING_SNAKE_CASE =int(config['num_epochs'] ) SCREAMING_SNAKE_CASE =int(config['seed'] ) SCREAMING_SNAKE_CASE =int(config['batch_size'] ) SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase_ ) def inner_training_loop(lowerCAmelCase_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE =model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ ) # Instantiate scheduler SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =outputs.loss accelerator.backward(lowerCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=lowerCAmelCase_, references=lowerCAmelCase_, ) SCREAMING_SNAKE_CASE =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.', ) parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' ) SCREAMING_SNAKE_CASE =parser.parse_args() SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(lowerCAmelCase_, lowerCAmelCase_ ) if __name__ == "__main__": main()
334
1
def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =[0] * len(lowerCAmelCase_ ) for i in range(1, len(lowerCAmelCase_ ) ): # use last results for better performance - dynamic programming SCREAMING_SNAKE_CASE =prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: SCREAMING_SNAKE_CASE =prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 SCREAMING_SNAKE_CASE =j return prefix_result def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" return max(prefix_function(lowerCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
334
def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" return " ".join( ''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("Hey wollef sroirraw"))
334
1
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _lowerCamelCase =10 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" for i in range(lowerCAmelCase_, lowerCAmelCase_ ): if array[i] == target: return i return -1 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =0 SCREAMING_SNAKE_CASE =len(lowerCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =(left + right) // 3 + 1 SCREAMING_SNAKE_CASE =2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: SCREAMING_SNAKE_CASE =one_third - 1 elif array[two_third] < target: SCREAMING_SNAKE_CASE =two_third + 1 else: SCREAMING_SNAKE_CASE =one_third + 1 SCREAMING_SNAKE_CASE =two_third - 1 else: return -1 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if left < right: if right - left < precision: return lin_search(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =(left + right) // 3 + 1 SCREAMING_SNAKE_CASE =2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_, one_third - 1, lowerCAmelCase_, lowerCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) else: return rec_ternary_search(one_third + 1, two_third - 1, lowerCAmelCase_, lowerCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase =input("Enter numbers separated by comma:\n").strip() _lowerCamelCase =[int(item.strip()) for item in user_input.split(",")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _lowerCamelCase =int(input("Enter the number to be found in the list:\n").strip()) _lowerCamelCase =ite_ternary_search(collection, target) _lowerCamelCase =rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'Iterative search: {target} found at positions: {resulta}') print(f'Recursive search: {target} found at positions: {resulta}') else: print("Not found")
334
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json _lowerCamelCase ="sshleifer/mar_enro_6_3_student" class a_ ( lowerCamelCase_ ): """simple docstring""" def _lowerCAmelCase ( self : Union[str, Any] ): super().setUp() SCREAMING_SNAKE_CASE =cached_path( 'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,) SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k' @slow @require_torch_gpu def _lowerCAmelCase ( self : Optional[int] ): MarianMTModel.from_pretrained(snake_case ) @slow @require_torch_gpu def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE ={ '$MAX_LEN': 64, '$BS': 64, '$GAS': 1, '$ENRO_DIR': self.data_dir, 'facebook/mbart-large-cc25': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '--learning_rate=3e-5': '--learning_rate 3e-4', '--num_train_epochs 6': '--num_train_epochs 1', } # Clean up bash script SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip() SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' ) for k, v in env_vars_to_replace.items(): SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) ) SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split() # XXX: args.gpus > 1 : handle multi_gpu in the future SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args with patch.object(snake_case ,'argv' ,snake_case ): SCREAMING_SNAKE_CASE =argparse.ArgumentParser() SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case ) SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() ) SCREAMING_SNAKE_CASE =parser.parse_args() SCREAMING_SNAKE_CASE =main(snake_case ) # Check metrics SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path ) SCREAMING_SNAKE_CASE =metrics['val'][0] SCREAMING_SNAKE_CASE =metrics['val'][-1] self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case ) self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['val_avg_bleu'] ,17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict SCREAMING_SNAKE_CASE =os.listdir(snake_case ) SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0] SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case ) SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' ) SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1 class a_ ( lowerCamelCase_ ): """simple docstring""" @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro' SCREAMING_SNAKE_CASE ={ '--fp16_opt_level=O1': '', '$MAX_LEN': 128, '$BS': 16, '$GAS': 1, '$ENRO_DIR': data_dir, '$m': 'sshleifer/student_marian_en_ro_6_1', 'val_check_interval=0.25': 'val_check_interval=1.0', } # Clean up bash script SCREAMING_SNAKE_CASE =( (self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip() ) SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' ) SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' ) for k, v in env_vars_to_replace.items(): SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) ) SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' ) SCREAMING_SNAKE_CASE =6 SCREAMING_SNAKE_CASE =( ['distillation.py'] + bash_script.split() + [ f'--output_dir={output_dir}', '--gpus=1', '--learning_rate=1e-3', f'--num_train_epochs={epochs}', '--warmup_steps=10', '--val_check_interval=1.0', '--do_predict', ] ) with patch.object(snake_case ,'argv' ,snake_case ): SCREAMING_SNAKE_CASE =argparse.ArgumentParser() SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case ) SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() ) SCREAMING_SNAKE_CASE =parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu SCREAMING_SNAKE_CASE =distill_main(snake_case ) # Check metrics SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path ) SCREAMING_SNAKE_CASE =metrics['val'][0] SCREAMING_SNAKE_CASE =metrics['val'][-1] assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case ) # check lightning ckpt can be loaded and has a reasonable statedict SCREAMING_SNAKE_CASE =os.listdir(snake_case ) SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0] SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case ) SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' ) SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1
334
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json", } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'nllb-moe' __UpperCAmelCase = ['past_key_values'] __UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,): SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =max_position_embeddings SCREAMING_SNAKE_CASE =d_model SCREAMING_SNAKE_CASE =encoder_ffn_dim SCREAMING_SNAKE_CASE =encoder_layers SCREAMING_SNAKE_CASE =encoder_attention_heads SCREAMING_SNAKE_CASE =decoder_ffn_dim SCREAMING_SNAKE_CASE =decoder_layers SCREAMING_SNAKE_CASE =decoder_attention_heads SCREAMING_SNAKE_CASE =dropout SCREAMING_SNAKE_CASE =attention_dropout SCREAMING_SNAKE_CASE =activation_dropout SCREAMING_SNAKE_CASE =activation_function SCREAMING_SNAKE_CASE =init_std SCREAMING_SNAKE_CASE =encoder_layerdrop SCREAMING_SNAKE_CASE =decoder_layerdrop SCREAMING_SNAKE_CASE =use_cache SCREAMING_SNAKE_CASE =encoder_layers SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE =router_z_loss_coef SCREAMING_SNAKE_CASE =router_aux_loss_coef SCREAMING_SNAKE_CASE =decoder_sparse_step SCREAMING_SNAKE_CASE =encoder_sparse_step SCREAMING_SNAKE_CASE =num_experts SCREAMING_SNAKE_CASE =expert_capacity SCREAMING_SNAKE_CASE =router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' ) SCREAMING_SNAKE_CASE =router_dtype SCREAMING_SNAKE_CASE =router_ignore_padding_tokens SCREAMING_SNAKE_CASE =batch_prioritized_routing SCREAMING_SNAKE_CASE =second_expert_policy SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction SCREAMING_SNAKE_CASE =moe_token_dropout SCREAMING_SNAKE_CASE =output_router_logits super().__init__( pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
334
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json", } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip_2_vision_model' def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,): super().__init__(**snake_case ) SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =patch_size SCREAMING_SNAKE_CASE =image_size SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =attention_dropout SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =qkv_bias @classmethod def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ): cls._set_token_in_kwargs(snake_case ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": SCREAMING_SNAKE_CASE =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(snake_case ,**snake_case ) class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip_2_qformer' def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,): super().__init__(pad_token_id=snake_case ,**snake_case ) SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =max_position_embeddings SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =position_embedding_type SCREAMING_SNAKE_CASE =cross_attention_frequency SCREAMING_SNAKE_CASE =encoder_hidden_size @classmethod def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ): cls._set_token_in_kwargs(snake_case ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": SCREAMING_SNAKE_CASE =config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(snake_case ,**snake_case ) class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip-2' __UpperCAmelCase = True def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ): super().__init__(**snake_case ) if vision_config is None: SCREAMING_SNAKE_CASE ={} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' ) if qformer_config is None: SCREAMING_SNAKE_CASE ={} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' ) if text_config is None: SCREAMING_SNAKE_CASE ={} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case ) SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case ) SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt' SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case ) SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder SCREAMING_SNAKE_CASE =num_query_tokens SCREAMING_SNAKE_CASE =self.vision_config.hidden_size SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES SCREAMING_SNAKE_CASE =1.0 SCREAMING_SNAKE_CASE =0.02 @classmethod def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,): return cls( vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE =self.vision_config.to_dict() SCREAMING_SNAKE_CASE =self.qformer_config.to_dict() SCREAMING_SNAKE_CASE =self.text_config.to_dict() SCREAMING_SNAKE_CASE =self.__class__.model_type return output
334
1
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json _lowerCamelCase ="sshleifer/mar_enro_6_3_student" class a_ ( lowerCamelCase_ ): """simple docstring""" def _lowerCAmelCase ( self : Union[str, Any] ): super().setUp() SCREAMING_SNAKE_CASE =cached_path( 'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,) SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k' @slow @require_torch_gpu def _lowerCAmelCase ( self : Optional[int] ): MarianMTModel.from_pretrained(snake_case ) @slow @require_torch_gpu def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE ={ '$MAX_LEN': 64, '$BS': 64, '$GAS': 1, '$ENRO_DIR': self.data_dir, 'facebook/mbart-large-cc25': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '--learning_rate=3e-5': '--learning_rate 3e-4', '--num_train_epochs 6': '--num_train_epochs 1', } # Clean up bash script SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip() SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' ) for k, v in env_vars_to_replace.items(): SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) ) SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split() # XXX: args.gpus > 1 : handle multi_gpu in the future SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args with patch.object(snake_case ,'argv' ,snake_case ): SCREAMING_SNAKE_CASE =argparse.ArgumentParser() SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case ) SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() ) SCREAMING_SNAKE_CASE =parser.parse_args() SCREAMING_SNAKE_CASE =main(snake_case ) # Check metrics SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path ) SCREAMING_SNAKE_CASE =metrics['val'][0] SCREAMING_SNAKE_CASE =metrics['val'][-1] self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case ) self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['val_avg_bleu'] ,17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict SCREAMING_SNAKE_CASE =os.listdir(snake_case ) SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0] SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case ) SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' ) SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1 class a_ ( lowerCamelCase_ ): """simple docstring""" @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro' SCREAMING_SNAKE_CASE ={ '--fp16_opt_level=O1': '', '$MAX_LEN': 128, '$BS': 16, '$GAS': 1, '$ENRO_DIR': data_dir, '$m': 'sshleifer/student_marian_en_ro_6_1', 'val_check_interval=0.25': 'val_check_interval=1.0', } # Clean up bash script SCREAMING_SNAKE_CASE =( (self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip() ) SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' ) SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' ) for k, v in env_vars_to_replace.items(): SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) ) SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' ) SCREAMING_SNAKE_CASE =6 SCREAMING_SNAKE_CASE =( ['distillation.py'] + bash_script.split() + [ f'--output_dir={output_dir}', '--gpus=1', '--learning_rate=1e-3', f'--num_train_epochs={epochs}', '--warmup_steps=10', '--val_check_interval=1.0', '--do_predict', ] ) with patch.object(snake_case ,'argv' ,snake_case ): SCREAMING_SNAKE_CASE =argparse.ArgumentParser() SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case ) SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() ) SCREAMING_SNAKE_CASE =parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu SCREAMING_SNAKE_CASE =distill_main(snake_case ) # Check metrics SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path ) SCREAMING_SNAKE_CASE =metrics['val'][0] SCREAMING_SNAKE_CASE =metrics['val'][-1] assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case ) # check lightning ckpt can be loaded and has a reasonable statedict SCREAMING_SNAKE_CASE =os.listdir(snake_case ) SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0] SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case ) SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' ) SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1
334
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets _lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n" _lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n" _lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): """simple docstring""" def _lowerCAmelCase ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('string' ,id='sequence' ), 'references': datasets.Value('string' ,id='sequence' ), } ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[ 'https://arxiv.org/abs/2102.01454', 'https://github.com/krishnap25/mauve', ] ,) def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,): SCREAMING_SNAKE_CASE =compute_mauve( p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,) return out
334
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase ={ "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
334
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'vit_mae' def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,): super().__init__(**snake_case ) SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =image_size SCREAMING_SNAKE_CASE =patch_size SCREAMING_SNAKE_CASE =num_channels SCREAMING_SNAKE_CASE =qkv_bias SCREAMING_SNAKE_CASE =decoder_num_attention_heads SCREAMING_SNAKE_CASE =decoder_hidden_size SCREAMING_SNAKE_CASE =decoder_num_hidden_layers SCREAMING_SNAKE_CASE =decoder_intermediate_size SCREAMING_SNAKE_CASE =mask_ratio SCREAMING_SNAKE_CASE =norm_pix_loss
334
1
import requests _lowerCamelCase ="YOUR API KEY" def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = giphy_api_key ): """simple docstring""" SCREAMING_SNAKE_CASE ='+'.join(query.split() ) SCREAMING_SNAKE_CASE =F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}' SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_ ).json()['data'] return [gif["url"] for gif in gifs] if __name__ == "__main__": print("\n".join(get_gifs("space ship")))
334
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available _lowerCamelCase ={ "configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST", "ErnieForCausalLM", "ErnieForMaskedLM", "ErnieForMultipleChoice", "ErnieForNextSentencePrediction", "ErnieForPreTraining", "ErnieForQuestionAnswering", "ErnieForSequenceClassification", "ErnieForTokenClassification", "ErnieModel", "ErniePreTrainedModel", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
334
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class a_ ( lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = TextToVideoSDPipeline __UpperCAmelCase = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __UpperCAmelCase = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'return_dict', 'callback', 'callback_steps', ] ) def _lowerCAmelCase ( self : Tuple ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') ,up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') ,cross_attention_dim=32 ,attention_head_dim=4 ,) SCREAMING_SNAKE_CASE =DDIMScheduler( beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=snake_case ,set_alpha_to_one=snake_case ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE =CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,) SCREAMING_SNAKE_CASE =CLIPTextModel(snake_case ) SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE ={ 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def _lowerCAmelCase ( self : List[str] ,snake_case : Optional[Any] ,snake_case : int=0 ): if str(snake_case ).startswith('mps' ): SCREAMING_SNAKE_CASE =torch.manual_seed(snake_case ) else: SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(snake_case ) SCREAMING_SNAKE_CASE ={ 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE =self.get_dummy_components() SCREAMING_SNAKE_CASE =TextToVideoSDPipeline(**snake_case ) SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) SCREAMING_SNAKE_CASE =self.get_dummy_inputs(snake_case ) SCREAMING_SNAKE_CASE ='np' SCREAMING_SNAKE_CASE =sd_pipe(**snake_case ).frames SCREAMING_SNAKE_CASE =frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) SCREAMING_SNAKE_CASE =np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCAmelCase ( self : Tuple ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case ,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _lowerCAmelCase ( self : List[Any] ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case ,expected_max_diff=1e-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def _lowerCAmelCase ( self : List[str] ): pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def _lowerCAmelCase ( self : str ): pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def _lowerCAmelCase ( self : Any ): pass def _lowerCAmelCase ( self : List[str] ): return super().test_progress_bar() @slow @skip_mps class a_ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) SCREAMING_SNAKE_CASE =TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) SCREAMING_SNAKE_CASE =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE =pipe.to('cuda' ) SCREAMING_SNAKE_CASE ='Spiderman is surfing' SCREAMING_SNAKE_CASE =torch.Generator(device='cpu' ).manual_seed(0 ) SCREAMING_SNAKE_CASE =pipe(snake_case ,generator=snake_case ,num_inference_steps=25 ,output_type='pt' ).frames SCREAMING_SNAKE_CASE =video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) SCREAMING_SNAKE_CASE =TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) SCREAMING_SNAKE_CASE =pipe.to('cuda' ) SCREAMING_SNAKE_CASE ='Spiderman is surfing' SCREAMING_SNAKE_CASE =torch.Generator(device='cpu' ).manual_seed(0 ) SCREAMING_SNAKE_CASE =pipe(snake_case ,generator=snake_case ,num_inference_steps=2 ,output_type='pt' ).frames SCREAMING_SNAKE_CASE =video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
334
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class a_ ( lowerCamelCase_ ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type ,pa.intaa() ) def _lowerCAmelCase ( self : Any ): with self.assertRaises(snake_case ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() ) def _lowerCAmelCase ( self : Union[str, Any] ): with self.assertRaises(snake_case ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) ) def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) ) self.assertEqual(arr.type ,pa.intaa() ) def _lowerCAmelCase ( self : int ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) ) def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) ) self.assertEqual(arr.type ,pa.intaa() ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) ) self.assertEqual(arr.type ,pa.string() ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) ) self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) ) def _lowerCAmelCase ( self : Dict ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) ) def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) ) self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) ) self.assertEqual(arr.type ,pa.string() ) @require_pil def _lowerCAmelCase ( self : int ): import PIL.Image SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) ) with patch( 'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects: SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1] self.assertIn('optimize_list_casting' ,snake_case ) self.assertFalse(kwargs['optimize_list_casting'] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize('writer_batch_size', [None, 1, 10] ) @pytest.mark.parametrize( 'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer: writer.write({'col_1': 'foo', 'col_2': 1} ) writer.write({'col_1': 'bar', 'col_2': 2} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} ) with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer: writer.write({'labels': 0} ) writer.write({'labels': 1} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =f.read_all() SCREAMING_SNAKE_CASE =pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(lowerCAmelCase_ ) @pytest.mark.parametrize('writer_batch_size', [None, 1, 10] ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ArrowWriter( stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer: with pytest.raises(lowerCAmelCase_ ): writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() @pytest.mark.parametrize('writer_batch_size', [None, 2, 10] ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ArrowWriter( stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer: with pytest.raises(lowerCAmelCase_ ): writer.write({'col_1': 'foo', 'col_2': 1}, key=10 ) writer.write({'col_1': 'bar', 'col_2': 2}, key=10 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() @pytest.mark.parametrize('writer_batch_size', [None, 2, 10] ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ArrowWriter( stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer: writer.write({'col_1': 'foo', 'col_2': 1}, key=1 ) writer.write({'col_1': 'bar', 'col_2': 2}, key=2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('writer_batch_size', [None, 1, 10] ) @pytest.mark.parametrize( 'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer: writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) writer.write_batch({'col_1': [], 'col_2': []} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('writer_batch_size', [None, 1, 10] ) @pytest.mark.parametrize( 'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer: writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('writer_batch_size', [None, 1, 10] ) @pytest.mark.parametrize( 'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer: writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) ) writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def snake_case__ ( ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()} SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' ) with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer: writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata ) _check_output(lowerCAmelCase_, 1 ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" if pa.types.is_list(lowerCAmelCase_ ): return get_base_dtype(arr_type.value_type ) else: return arr_type def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if isinstance(lst[0], lowerCAmelCase_ ): change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ ) else: SCREAMING_SNAKE_CASE =value @pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] ) @pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( 'col, expected_dtype', [ ('attention_mask', pa.inta()), ('special_tokens_mask', pa.inta()), ('token_type_ids', pa.inta()), ('input_ids', pa.intaa()), ('other', pa.intaa()), ], ) @pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize('raise_exception', [False, True] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' ) try: with ArrowWriter(path=lowerCAmelCase_ ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow' with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs, type(lowerCAmelCase_ ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({'col_1': 'foo', 'col_2': 1} ) writer.write({'col_1': 'bar', 'col_2': 2} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(lowerCAmelCase_ ) def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ParquetWriter(stream=lowerCAmelCase_ ) as writer: writer.write({'col_1': 'foo', 'col_2': 1} ) writer.write({'col_1': 'bar', 'col_2': 2} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize('embed_local_files', [False, True] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" import PIL.Image SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' ) PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' ) SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ParquetWriter( stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer: writer.write({'image': image_path} ) writer.finalize() SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =pa_table.to_pydict() if embed_local_files: assert isinstance(out['image'][0]['path'], lowerCAmelCase_ ) with open(lowerCAmelCase_, 'rb' ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] ) SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ArrowWriter(stream=lowerCAmelCase_ ) as writer: writer._build_writer(inferred_schema=lowerCAmelCase_ ) assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
334
1
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging _lowerCamelCase =logging.get_logger(__name__) class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = ['input_features', 'attention_mask'] def __init__( self : Dict ,snake_case : Tuple=80 ,snake_case : Optional[Any]=16000 ,snake_case : List[str]=0.0 ,snake_case : Tuple=10 ,snake_case : List[Any]=25 ,snake_case : int="hamming_window" ,snake_case : Optional[Any]=32_768.0 ,snake_case : str=0.97 ,snake_case : List[str]=1.0 ,snake_case : Union[str, Any]=True ,snake_case : List[Any]=True ,snake_case : int=False ,**snake_case : Union[str, Any] ,): super().__init__(feature_size=snake_case ,sampling_rate=snake_case ,padding_value=snake_case ,**snake_case ) SCREAMING_SNAKE_CASE =feature_size SCREAMING_SNAKE_CASE =sampling_rate SCREAMING_SNAKE_CASE =padding_value SCREAMING_SNAKE_CASE =hop_length SCREAMING_SNAKE_CASE =win_length SCREAMING_SNAKE_CASE =frame_signal_scale SCREAMING_SNAKE_CASE =preemphasis_coeff SCREAMING_SNAKE_CASE =mel_floor SCREAMING_SNAKE_CASE =normalize_means SCREAMING_SNAKE_CASE =normalize_vars SCREAMING_SNAKE_CASE =win_function SCREAMING_SNAKE_CASE =return_attention_mask SCREAMING_SNAKE_CASE =win_length * sampling_rate // 1000 SCREAMING_SNAKE_CASE =hop_length * sampling_rate // 1000 SCREAMING_SNAKE_CASE =optimal_fft_length(self.sample_size ) SCREAMING_SNAKE_CASE =(self.n_fft // 2) + 1 def _lowerCAmelCase ( self : int ,snake_case : np.array ): if self.win_function == "hamming_window": SCREAMING_SNAKE_CASE =window_function(window_length=self.sample_size ,name=self.win_function ,periodic=snake_case ) else: SCREAMING_SNAKE_CASE =window_function(window_length=self.sample_size ,name=self.win_function ) SCREAMING_SNAKE_CASE =mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,) SCREAMING_SNAKE_CASE =spectrogram( one_waveform * self.frame_signal_scale ,window=snake_case ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=snake_case ,preemphasis=self.preemphasis_coeff ,mel_filters=snake_case ,mel_floor=self.mel_floor ,log_mel='log' ,) return msfc_features.T def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : int ,snake_case : List[Any] ): # make sure we normalize float32 arrays if self.normalize_means: SCREAMING_SNAKE_CASE =x[:input_length].mean(axis=0 ) SCREAMING_SNAKE_CASE =np.subtract(snake_case ,snake_case ) if self.normalize_vars: SCREAMING_SNAKE_CASE =x[:input_length].std(axis=0 ) SCREAMING_SNAKE_CASE =np.divide(snake_case ,snake_case ) if input_length < x.shape[0]: SCREAMING_SNAKE_CASE =padding_value # make sure array is in float32 SCREAMING_SNAKE_CASE =x.astype(np.floataa ) return x def _lowerCAmelCase ( self : Dict ,snake_case : List[np.ndarray] ,snake_case : Optional[np.ndarray] = None ): SCREAMING_SNAKE_CASE =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(snake_case ,snake_case ,self.padding_value ) for x, n in zip(snake_case ,snake_case )] def __call__( self : Optional[Any] ,snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,snake_case : Union[bool, str, PaddingStrategy] = False ,snake_case : Optional[int] = None ,snake_case : bool = False ,snake_case : Optional[int] = None ,snake_case : Optional[bool] = None ,snake_case : Optional[Union[str, TensorType]] = None ,snake_case : Optional[int] = None ,**snake_case : str ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) SCREAMING_SNAKE_CASE =isinstance(snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) SCREAMING_SNAKE_CASE =is_batched_numpy or ( isinstance(snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE =[np.asarray(snake_case ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case ,np.ndarray ): SCREAMING_SNAKE_CASE =np.asarray(snake_case ,dtype=np.floataa ) elif isinstance(snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE =raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE =[raw_speech] # extract fbank features SCREAMING_SNAKE_CASE =[self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech] # convert into correct format for padding SCREAMING_SNAKE_CASE =BatchFeature({'input_features': features} ) SCREAMING_SNAKE_CASE =self.pad( snake_case ,padding=snake_case ,max_length=snake_case ,truncation=snake_case ,pad_to_multiple_of=snake_case ,return_attention_mask=snake_case ,**snake_case ,) # make sure list is in array format SCREAMING_SNAKE_CASE =padded_inputs.get('input_features' ) if isinstance(input_features[0] ,snake_case ): SCREAMING_SNAKE_CASE =[np.asarray(snake_case ,dtype=np.floataa ) for feature in input_features] SCREAMING_SNAKE_CASE =padded_inputs.get('attention_mask' ) if attention_mask is not None: SCREAMING_SNAKE_CASE =[np.asarray(snake_case ,dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: SCREAMING_SNAKE_CASE =( np.array(snake_case ,dtype=np.intaa ) if self._get_padding_strategies(snake_case ,max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) SCREAMING_SNAKE_CASE =self.normalize( padded_inputs['input_features'] ,attention_mask=snake_case ) if return_tensors is not None: SCREAMING_SNAKE_CASE =padded_inputs.convert_to_tensors(snake_case ) return padded_inputs
334
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def snake_case__ ( ): """simple docstring""" assert or_gate(0, 0 ) == 0 assert or_gate(0, 1 ) == 1 assert or_gate(1, 0 ) == 1 assert or_gate(1, 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
334
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCamelCase ={} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =["GPTSw3Tokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
334
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={"vocab_file": "vocab.txt"} _lowerCamelCase ={ "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } _lowerCamelCase ={ "facebook/esm2_t6_8M_UR50D": 10_24, "facebook/esm2_t12_35M_UR50D": 10_24, } def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" with open(lowerCAmelCase_, 'r' ) as f: SCREAMING_SNAKE_CASE =f.read().splitlines() return [l.strip() for l in lines] class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = ['input_ids', 'attention_mask'] def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,): super().__init__(**snake_case ) SCREAMING_SNAKE_CASE =load_vocab_file(snake_case ) SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) ) SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )} SCREAMING_SNAKE_CASE =unk_token SCREAMING_SNAKE_CASE =cls_token SCREAMING_SNAKE_CASE =pad_token SCREAMING_SNAKE_CASE =mask_token SCREAMING_SNAKE_CASE =eos_token SCREAMING_SNAKE_CASE =self.all_tokens self._create_trie(self.unique_no_split_tokens ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ): return self._id_to_token.get(snake_case ,self.unk_token ) def _lowerCAmelCase ( self : Dict ,snake_case : str ): return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) ) def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ): return text.split() def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ): return len(self._id_to_token ) def _lowerCAmelCase ( self : List[str] ): return {token: i for i, token in enumerate(self.all_tokens )} def _lowerCAmelCase ( self : List[Any] ,snake_case : str ): return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) ) def _lowerCAmelCase ( self : Any ,snake_case : int ): return self._id_to_token.get(snake_case ,self.unk_token ) def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE =[self.cls_token_id] SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1] if token_ids_a is not None: mask += [0] * len(snake_case ) + [1] return mask def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ): SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' ) with open(snake_case ,'w' ) as f: f.write('\n'.join(self.all_tokens ) ) return (vocab_file,) @property def _lowerCAmelCase ( self : int ): return self.get_vocab_size(with_added_tokens=snake_case ) def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ): return super()._add_tokens(snake_case ,special_tokens=snake_case )
334
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: _lowerCamelCase =None _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _lowerCamelCase ={ "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } _lowerCamelCase ={ "facebook/mbart-large-en-ro": 10_24, "facebook/mbart-large-cc25": 10_24, } # fmt: off _lowerCamelCase =["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = ['input_ids', 'attention_mask'] __UpperCAmelCase = MBartTokenizer __UpperCAmelCase = [] __UpperCAmelCase = [] def __init__( self : str ,snake_case : Any=None ,snake_case : Dict=None ,snake_case : Tuple="<s>" ,snake_case : int="</s>" ,snake_case : Optional[Any]="</s>" ,snake_case : Optional[int]="<s>" ,snake_case : Optional[Any]="<unk>" ,snake_case : Dict="<pad>" ,snake_case : Union[str, Any]="<mask>" ,snake_case : List[Any]=None ,snake_case : Dict=None ,snake_case : int=None ,**snake_case : Dict ,): # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE =AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else mask_token super().__init__( vocab_file=snake_case ,tokenizer_file=snake_case ,bos_token=snake_case ,eos_token=snake_case ,sep_token=snake_case ,cls_token=snake_case ,unk_token=snake_case ,pad_token=snake_case ,mask_token=snake_case ,src_lang=snake_case ,tgt_lang=snake_case ,additional_special_tokens=snake_case ,**snake_case ,) SCREAMING_SNAKE_CASE =vocab_file SCREAMING_SNAKE_CASE =False if not self.vocab_file else True SCREAMING_SNAKE_CASE =FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) SCREAMING_SNAKE_CASE ={ lang_code: self.convert_tokens_to_ids(snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES } SCREAMING_SNAKE_CASE =src_lang if src_lang is not None else 'en_XX' SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(self._src_lang ) SCREAMING_SNAKE_CASE =tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _lowerCAmelCase ( self : List[Any] ): return self._src_lang @src_lang.setter def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : str ): SCREAMING_SNAKE_CASE =new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowerCAmelCase ( self : Dict ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCAmelCase ( self : str ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE =[self.sep_token_id] SCREAMING_SNAKE_CASE =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Dict ,snake_case : str ,snake_case : Optional[str] ,snake_case : Optional[str] ,**snake_case : Optional[int] ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) SCREAMING_SNAKE_CASE =src_lang SCREAMING_SNAKE_CASE =self(snake_case ,add_special_tokens=snake_case ,return_tensors=snake_case ,**snake_case ) SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(snake_case ) SCREAMING_SNAKE_CASE =tgt_lang_id return inputs def _lowerCAmelCase ( self : Optional[int] ,snake_case : List[str] ,snake_case : str = "en_XX" ,snake_case : Optional[List[str]] = None ,snake_case : str = "ro_RO" ,**snake_case : str ,): SCREAMING_SNAKE_CASE =src_lang SCREAMING_SNAKE_CASE =tgt_lang return super().prepare_seqaseq_batch(snake_case ,snake_case ,**snake_case ) def _lowerCAmelCase ( self : Any ): return self.set_src_lang_special_tokens(self.src_lang ) def _lowerCAmelCase ( self : str ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ): SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(snake_case ) SCREAMING_SNAKE_CASE =[] SCREAMING_SNAKE_CASE =[self.eos_token_id, self.cur_lang_code] SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(self.prefix_tokens ) SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(self.suffix_tokens ) SCREAMING_SNAKE_CASE =processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,) def _lowerCAmelCase ( self : str ,snake_case : str ): SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(snake_case ) SCREAMING_SNAKE_CASE =[] SCREAMING_SNAKE_CASE =[self.eos_token_id, self.cur_lang_code] SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(self.prefix_tokens ) SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(self.suffix_tokens ) SCREAMING_SNAKE_CASE =processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,) def _lowerCAmelCase ( self : str ,snake_case : str ,snake_case : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return SCREAMING_SNAKE_CASE =os.path.join( snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ): copyfile(self.vocab_file ,snake_case ) return (out_vocab_file,)
334
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase =logging.get_logger() @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self : int ,snake_case : Tensor ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def _lowerCAmelCase ( self : Tuple ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) ) @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 0 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) def __call__( self : int ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) ) SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( f'Numbers of operations are different. Source module has {len(snake_case )} operations while' f' destination module has {len(snake_case )}.' ) for dest_m, src_m in zip(snake_case ,snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}' ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ): """simple docstring""" print(F'Converting {name}...' ) with torch.no_grad(): SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval() SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval() SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) ) module_transfer(lowerCAmelCase_ ) assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one." SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}' print(lowerCAmelCase_ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, ) # we can use the convnext one SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, ) print(F'Pushed {checkpoint_name}' ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ): """simple docstring""" SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE =1000 SCREAMING_SNAKE_CASE =(1, num_labels) SCREAMING_SNAKE_CASE ='huggingface/label-files' SCREAMING_SNAKE_CASE =num_labels SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) ) SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =idalabel SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE ={ 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), } if model_name: convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) return config, expected_shape if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported resnet* architecture," " currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) _lowerCamelCase =parser.parse_args() _lowerCamelCase =args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
1
import random def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =a[left_index] SCREAMING_SNAKE_CASE =left_index + 1 for j in range(left_index + 1, lowerCAmelCase_ ): if a[j] < pivot: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =a[i], a[j] i += 1 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =a[i - 1], a[left_index] return i - 1 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if left < right: SCREAMING_SNAKE_CASE =random.randint(lowerCAmelCase_, right - 1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =( a[left], a[pivot], ) # switches the pivot with the left most bound SCREAMING_SNAKE_CASE =partition(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) quick_sort_random( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) # recursive quicksort to the left of the pivot point quick_sort_random( lowerCAmelCase_, pivot_index + 1, lowerCAmelCase_ ) # recursive quicksort to the right of the pivot point def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =input('Enter numbers separated by a comma:\n' ).strip() SCREAMING_SNAKE_CASE =[int(lowerCAmelCase_ ) for item in user_input.split(',' )] quick_sort_random(lowerCAmelCase_, 0, len(lowerCAmelCase_ ) ) print(lowerCAmelCase_ ) if __name__ == "__main__": main()
334
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ): """simple docstring""" SCREAMING_SNAKE_CASE =None if token is not None: SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE ='636036' SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}' SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json() return result["workflow_runs"] def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE =workflow_run['id'] break return workflow_run_id def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE =artifacts_links[artifact_name] download_artifact( artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) SCREAMING_SNAKE_CASE ={} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' ) if os.path.isfile(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE ={} with zipfile.ZipFile(lowerCAmelCase_ ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCAmelCase_ ): # read the file with z.open(lowerCAmelCase_ ) as f: SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' ) return results
334
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase =logging.get_logger() @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self : int ,snake_case : Tensor ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def _lowerCAmelCase ( self : Tuple ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) ) @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 0 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) def __call__( self : int ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) ) SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( f'Numbers of operations are different. Source module has {len(snake_case )} operations while' f' destination module has {len(snake_case )}.' ) for dest_m, src_m in zip(snake_case ,snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}' ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ): """simple docstring""" print(F'Converting {name}...' ) with torch.no_grad(): SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval() SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval() SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) ) module_transfer(lowerCAmelCase_ ) assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one." SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}' print(lowerCAmelCase_ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, ) # we can use the convnext one SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, ) print(F'Pushed {checkpoint_name}' ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ): """simple docstring""" SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE =1000 SCREAMING_SNAKE_CASE =(1, num_labels) SCREAMING_SNAKE_CASE ='huggingface/label-files' SCREAMING_SNAKE_CASE =num_labels SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) ) SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =idalabel SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE ={ 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), } if model_name: convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) return config, expected_shape if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported resnet* architecture," " currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) _lowerCamelCase =parser.parse_args() _lowerCamelCase =args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class a_ ( lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,): SCREAMING_SNAKE_CASE =parent SCREAMING_SNAKE_CASE =batch_size SCREAMING_SNAKE_CASE =seq_length SCREAMING_SNAKE_CASE =is_training SCREAMING_SNAKE_CASE =use_input_mask SCREAMING_SNAKE_CASE =use_token_type_ids SCREAMING_SNAKE_CASE =use_labels SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =max_position_embeddings SCREAMING_SNAKE_CASE =type_vocab_size SCREAMING_SNAKE_CASE =type_sequence_label_size SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =num_labels SCREAMING_SNAKE_CASE =num_choices SCREAMING_SNAKE_CASE =scope SCREAMING_SNAKE_CASE =q_groups SCREAMING_SNAKE_CASE =k_groups SCREAMING_SNAKE_CASE =v_groups SCREAMING_SNAKE_CASE =post_attention_groups SCREAMING_SNAKE_CASE =intermediate_groups SCREAMING_SNAKE_CASE =output_groups def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) SCREAMING_SNAKE_CASE =None if self.use_input_mask: SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =None if self.use_labels: SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size ) SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices ) SCREAMING_SNAKE_CASE =self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCAmelCase ( self : Optional[int] ): return SqueezeBertConfig( embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,) def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ): SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ,snake_case ) SCREAMING_SNAKE_CASE =model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ): SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ): SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model( snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ): SCREAMING_SNAKE_CASE =self.num_labels SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ): SCREAMING_SNAKE_CASE =self.num_labels SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.num_choices SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() SCREAMING_SNAKE_CASE =model( snake_case ,attention_mask=snake_case ,labels=snake_case ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) __UpperCAmelCase = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = False def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self ) SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 ) def _lowerCAmelCase ( self : List[str] ): self.config_tester.run_common_tests() def _lowerCAmelCase ( self : Optional[int] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*snake_case ) def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case ) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case ) def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case ) def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case ) def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case ) @slow def _lowerCAmelCase ( self : str ): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @require_sentencepiece @require_tokenizers @require_torch class a_ ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] ) SCREAMING_SNAKE_CASE =model(snake_case )[0] SCREAMING_SNAKE_CASE =torch.Size((1, 3) ) self.assertEqual(output.shape ,snake_case ) SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] ) self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
334
1
_lowerCamelCase =8.314462 # Unit - J mol-1 K-1 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
334
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = None __UpperCAmelCase = None @property def _lowerCAmelCase ( self : List[Any] ): return self.feat_extract_tester.prepare_feat_extract_dict() def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(snake_case ,'feature_size' ) ) self.assertTrue(hasattr(snake_case ,'sampling_rate' ) ) self.assertTrue(hasattr(snake_case ,'padding_value' ) ) def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case ) SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' ) SCREAMING_SNAKE_CASE =processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE =batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def _lowerCAmelCase ( self : Optional[int] ): SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case ) SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' ) SCREAMING_SNAKE_CASE =processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE =batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def _lowerCAmelCase ( self : str ): SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case ) SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' ) SCREAMING_SNAKE_CASE =processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE =batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ): def _inputs_have_equal_length(snake_case : Dict ): SCREAMING_SNAKE_CASE =len(input[0] ) for input_slice in input[1:]: if len(snake_case ) != length: return False return True def _inputs_are_equal(snake_case : str ,snake_case : Dict ): if len(snake_case ) != len(snake_case ): return False for input_slice_a, input_slice_a in zip(snake_case ,snake_case ): if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ): return False return True SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case ) SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size # test padding for List[int] + numpy SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' ) SCREAMING_SNAKE_CASE =input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(snake_case ): feat_extract.pad(snake_case ,padding='max_length' )[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' ) SCREAMING_SNAKE_CASE =input_a[input_name] self.assertFalse(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,) SCREAMING_SNAKE_CASE =input_a[input_name] self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) ) SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1e-3 ) def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ): def _inputs_have_equal_length(snake_case : str ): SCREAMING_SNAKE_CASE =len(input[0] ) for input_slice in input[1:]: if len(snake_case ) != length: return False return True def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ): if len(snake_case ) != len(snake_case ): return False for input_slice_a, input_slice_a in zip(snake_case ,snake_case ): if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ): return False return True SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case ) SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) # truncate to smallest SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ) SCREAMING_SNAKE_CASE =input_a[input_name] self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertFalse(_inputs_have_equal_length(snake_case ) ) # truncate to smallest with np SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ) SCREAMING_SNAKE_CASE =input_a[input_name] self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(snake_case ) ) # truncate to middle SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' ) SCREAMING_SNAKE_CASE =input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(snake_case ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case ): feat_extract.pad(snake_case ,truncation=snake_case )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case ): feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case ): feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(snake_case ): feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy SCREAMING_SNAKE_CASE =12 SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,) SCREAMING_SNAKE_CASE =input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of SCREAMING_SNAKE_CASE =len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertFalse(_inputs_have_equal_length(snake_case ) ) def _lowerCAmelCase ( self : Optional[int] ): self._check_padding(numpify=snake_case ) def _lowerCAmelCase ( self : Tuple ): self._check_padding(numpify=snake_case ) def _lowerCAmelCase ( self : List[str] ): self._check_truncation(numpify=snake_case ) def _lowerCAmelCase ( self : int ): self._check_truncation(numpify=snake_case ) @require_torch def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) @require_tf def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =self.feat_extract_dict SCREAMING_SNAKE_CASE =True SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs] SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' ) self.assertIn('attention_mask' ,snake_case ) self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =self.feat_extract_dict SCREAMING_SNAKE_CASE =True SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs] SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE =min(snake_case ) SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' ) self.assertIn('attention_mask' ,snake_case ) self.assertListEqual( list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
334
1
import math import os import sys def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE ='' try: with open(lowerCAmelCase_, 'rb' ) as binary_file: SCREAMING_SNAKE_CASE =binary_file.read() for dat in data: SCREAMING_SNAKE_CASE =F'{dat:08b}' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" lexicon.pop(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =last_match_id if math.loga(lowerCAmelCase_ ).is_integer(): for curr_key in lexicon: SCREAMING_SNAKE_CASE ='0' + lexicon[curr_key] SCREAMING_SNAKE_CASE =bin(lowerCAmelCase_ )[2:] def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE ={'0': '0', '1': '1'} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ='', '' SCREAMING_SNAKE_CASE =len(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue SCREAMING_SNAKE_CASE =lexicon[curr_string] result += last_match_id add_key_to_lexicon(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) index += 1 SCREAMING_SNAKE_CASE ='' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": SCREAMING_SNAKE_CASE =lexicon[curr_string] result += last_match_id return result def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =os.path.getsize(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =bin(lowerCAmelCase_ )[2:] SCREAMING_SNAKE_CASE =len(lowerCAmelCase_ ) return "0" * (length_length - 1) + file_length_binary + compressed def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =8 try: with open(lowerCAmelCase_, 'wb' ) as opened_file: SCREAMING_SNAKE_CASE =[ to_write[i : i + byte_length] for i in range(0, len(lowerCAmelCase_ ), lowerCAmelCase_ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(lowerCAmelCase_, 2 ).to_bytes(1, byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =read_file_binary(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =compress_data(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =add_file_length(lowerCAmelCase_, lowerCAmelCase_ ) write_file_binary(lowerCAmelCase_, lowerCAmelCase_ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
334
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _lowerCamelCase =2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _lowerCamelCase =50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _lowerCamelCase =0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] ) return (item, float(lowerCAmelCase_ )) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 ) SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:] SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ ) if random.uniform(0, 1 ) < MUTATION_PROBABILITY: SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ): """simple docstring""" SCREAMING_SNAKE_CASE =[] # Generate more children proportionally to the fitness score. SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1 SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n for _ in range(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ ) # Append new string to the population list. pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) ) pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) ) return pop def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ): """simple docstring""" if N_POPULATION < N_SELECTED: SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(lowerCAmelCase_ ) # Verify that the target contains no genes besides the ones inside genes variable. SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} ) if not_in_genes_list: SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(lowerCAmelCase_ ) # Generate random starting population. SCREAMING_SNAKE_CASE =[] for _ in range(lowerCAmelCase_ ): population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) ) # Just some logs to know what the algorithms is doing. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowerCAmelCase_ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population] # Check if there is a matching evolution. SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowerCAmelCase_ ) # Normalize population score to be between 0 and 1. SCREAMING_SNAKE_CASE =[ (item, score / len(lowerCAmelCase_ )) for item, score in population_score ] # This is selection for i in range(lowerCAmelCase_ ): population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowerCAmelCase_ ) > N_POPULATION: break if __name__ == "__main__": _lowerCamelCase =( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) _lowerCamelCase =list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
334
1
from __future__ import annotations def snake_case__ ( lowerCAmelCase_ = 4 ): """simple docstring""" SCREAMING_SNAKE_CASE =abs(lowerCAmelCase_ ) or 4 return [[1 + x + y * row_size for x in range(lowerCAmelCase_ )] for y in range(lowerCAmelCase_ )] def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" return reverse_row(transpose(lowerCAmelCase_ ) ) # OR.. transpose(reverse_column(matrix)) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" return reverse_row(reverse_column(lowerCAmelCase_ ) ) # OR.. reverse_column(reverse_row(matrix)) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" return reverse_column(transpose(lowerCAmelCase_ ) ) # OR.. transpose(reverse_row(matrix)) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =[list(lowerCAmelCase_ ) for x in zip(*lowerCAmelCase_ )] return matrix def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =matrix[::-1] return matrix def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =[x[::-1] for x in matrix] return matrix def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" for i in matrix: print(*lowerCAmelCase_ ) if __name__ == "__main__": _lowerCamelCase =make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 90 counterclockwise:\n") print_matrix(rotate_aa(matrix)) _lowerCamelCase =make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 180:\n") print_matrix(rotate_aaa(matrix)) _lowerCamelCase =make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 270 counterclockwise:\n") print_matrix(rotate_aaa(matrix))
334
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class a_ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict ): # A mock response for an HTTP head request to emulate server down SCREAMING_SNAKE_CASE =mock.Mock() SCREAMING_SNAKE_CASE =500 SCREAMING_SNAKE_CASE ={} SCREAMING_SNAKE_CASE =HTTPError SCREAMING_SNAKE_CASE ={} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head: SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def _lowerCAmelCase ( self : Optional[Any] ): # A mock response for an HTTP head request to emulate server down SCREAMING_SNAKE_CASE =mock.Mock() SCREAMING_SNAKE_CASE =500 SCREAMING_SNAKE_CASE ={} SCREAMING_SNAKE_CASE =HTTPError SCREAMING_SNAKE_CASE ={} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head: SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' ) # This check we did call the fake head request mock_head.assert_called() def _lowerCAmelCase ( self : Union[str, Any] ): # This test is for deprecated behavior and can be removed in v5 try: SCREAMING_SNAKE_CASE =tempfile.mktemp() with open(snake_case ,'wb' ) as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case ) SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case ) finally: os.remove(snake_case ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json' ,'wb' ) as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case ) SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json' ) def _lowerCAmelCase ( self : int ): # This test is for deprecated behavior and can be removed in v5 SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ) @is_staging_test class a_ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def _lowerCAmelCase ( cls : List[Any] ): SCREAMING_SNAKE_CASE =TOKEN HfFolder.save_token(snake_case ) @classmethod def _lowerCAmelCase ( cls : Tuple ): try: delete_repo(token=cls._token ,repo_id='test-tokenizer' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' ) except HTTPError: pass def _lowerCAmelCase ( self : Any ): with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' ) with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE =BertTokenizer(snake_case ) tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id='test-tokenizer' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def _lowerCAmelCase ( self : Optional[Any] ): with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' ) with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE =BertTokenizer(snake_case ) tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def _lowerCAmelCase ( self : str ): CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' ) with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case ) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' ) with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case ) bert_tokenizer.save_pretrained(snake_case ) SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case ) tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' ) SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained( f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' ) class a_ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =Trie() trie.add('Hello 友達' ) self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} ) trie.add('Hello' ) trie.data self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} ) def _lowerCAmelCase ( self : Optional[int] ): SCREAMING_SNAKE_CASE =Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] ) trie.add('[CLS]' ) trie.add('extra_id_1' ) trie.add('extra_id_100' ) self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =Trie() trie.add('A' ) self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] ) self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =Trie() trie.add('TOKEN]' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] ) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =Trie() trie.add('A' ) trie.add('P' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =Trie() trie.add('AB' ) trie.add('B' ) trie.add('C' ) self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] ) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =Trie() trie.add('ABC' ) trie.add('B' ) trie.add('CD' ) self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] ) def _lowerCAmelCase ( self : Optional[Any] ): # Even if the offsets are wrong, we necessarily output correct string # parts. SCREAMING_SNAKE_CASE =Trie() SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(snake_case ,['AB', 'C'] )
334
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed _lowerCamelCase ={ "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if args.student_type == "roberta": SCREAMING_SNAKE_CASE =False elif args.student_type == "gpt2": SCREAMING_SNAKE_CASE =False def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if args.student_type == "roberta": SCREAMING_SNAKE_CASE =False def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Training' ) parser.add_argument('--force', action='store_true', help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path', type=lowerCAmelCase_, required=lowerCAmelCase_, help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file', type=lowerCAmelCase_, required=lowerCAmelCase_, help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.', ) parser.add_argument( '--student_type', type=lowerCAmelCase_, choices=['distilbert', 'roberta', 'gpt2'], required=lowerCAmelCase_, help='The student type (DistilBERT, RoBERTa).', ) parser.add_argument('--student_config', type=lowerCAmelCase_, required=lowerCAmelCase_, help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights', default=lowerCAmelCase_, type=lowerCAmelCase_, help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type', choices=['bert', 'roberta', 'gpt2'], required=lowerCAmelCase_, help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name', type=lowerCAmelCase_, required=lowerCAmelCase_, help='The teacher model.' ) parser.add_argument('--temperature', default=2.0, type=lowerCAmelCase_, help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce', default=0.5, type=lowerCAmelCase_, help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm', default=0.0, type=lowerCAmelCase_, help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.', ) parser.add_argument('--alpha_clm', default=0.5, type=lowerCAmelCase_, help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse', default=0.0, type=lowerCAmelCase_, help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos', default=0.0, type=lowerCAmelCase_, help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm', action='store_true', help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop', default=0.15, type=lowerCAmelCase_, help='Proportion of tokens for which we need to make a prediction.', ) parser.add_argument('--word_mask', default=0.8, type=lowerCAmelCase_, help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep', default=0.1, type=lowerCAmelCase_, help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand', default=0.1, type=lowerCAmelCase_, help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing', default=0.7, type=lowerCAmelCase_, help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).', ) parser.add_argument('--token_counts', type=lowerCAmelCase_, help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask', action='store_true', help='If true, compute the distillation loss only the [MLM] prediction distribution.', ) parser.add_argument( '--freeze_pos_embs', action='store_true', help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.', ) parser.add_argument( '--freeze_token_type_embds', action='store_true', help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.', ) parser.add_argument('--n_epoch', type=lowerCAmelCase_, default=3, help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size', type=lowerCAmelCase_, default=5, help='Batch size (for each process).' ) parser.add_argument( '--group_by_size', action='store_false', help='If true, group sequences that have similar length into the same batch. Default is true.', ) parser.add_argument( '--gradient_accumulation_steps', type=lowerCAmelCase_, default=50, help='Gradient accumulation for larger training batches.', ) parser.add_argument('--warmup_prop', default=0.05, type=lowerCAmelCase_, help='Linear warmup proportion.' ) parser.add_argument('--weight_decay', default=0.0, type=lowerCAmelCase_, help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate', default=5e-4, type=lowerCAmelCase_, help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon', default=1e-6, type=lowerCAmelCase_, help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm', default=5.0, type=lowerCAmelCase_, help='Max gradient norm.' ) parser.add_argument('--initializer_range', default=0.02, type=lowerCAmelCase_, help='Random initialization range.' ) parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit', ) parser.add_argument( '--fp16_opt_level', type=lowerCAmelCase_, default='O1', help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ), ) parser.add_argument('--n_gpu', type=lowerCAmelCase_, default=1, help='Number of GPUs in the node.' ) parser.add_argument('--local_rank', type=lowerCAmelCase_, default=-1, help='Distributed training - Local rank' ) parser.add_argument('--seed', type=lowerCAmelCase_, default=56, help='Random seed' ) parser.add_argument('--log_interval', type=lowerCAmelCase_, default=500, help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval', type=lowerCAmelCase_, default=4000, help='Checkpoint interval.' ) SCREAMING_SNAKE_CASE =parser.parse_args() sanity_checks(lowerCAmelCase_ ) # ARGS # init_gpu_params(lowerCAmelCase_ ) set_seed(lowerCAmelCase_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'Experiment will be dumped and logged in {args.dump_path}' ) # SAVE PARAMS # logger.info(F'Param: {args}' ) with open(os.path.join(args.dump_path, 'parameters.json' ), 'w' ) as f: json.dump(vars(lowerCAmelCase_ ), lowerCAmelCase_, indent=4 ) git_log(args.dump_path ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =MODEL_CLASSES[args.student_type] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =MODEL_CLASSES[args.teacher_type] # TOKENIZER # SCREAMING_SNAKE_CASE =teacher_tokenizer_class.from_pretrained(args.teacher_name ) SCREAMING_SNAKE_CASE ={} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): SCREAMING_SNAKE_CASE =tokenizer.all_special_tokens.index(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =tokenizer.all_special_ids[idx] logger.info(F'Special tokens {special_tok_ids}' ) SCREAMING_SNAKE_CASE =special_tok_ids SCREAMING_SNAKE_CASE =tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'Loading data from {args.data_file}' ) with open(args.data_file, 'rb' ) as fp: SCREAMING_SNAKE_CASE =pickle.load(lowerCAmelCase_ ) if args.mlm: logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' ) with open(args.token_counts, 'rb' ) as fp: SCREAMING_SNAKE_CASE =pickle.load(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =np.maximum(lowerCAmelCase_, 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): SCREAMING_SNAKE_CASE =0.0 # do not predict special tokens SCREAMING_SNAKE_CASE =torch.from_numpy(lowerCAmelCase_ ) else: SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =LmSeqsDataset(params=lowerCAmelCase_, data=lowerCAmelCase_ ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'Loading student config from {args.student_config}' ) SCREAMING_SNAKE_CASE =student_config_class.from_pretrained(args.student_config ) SCREAMING_SNAKE_CASE =True if args.student_pretrained_weights is not None: logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' ) SCREAMING_SNAKE_CASE =student_model_class.from_pretrained(args.student_pretrained_weights, config=lowerCAmelCase_ ) else: SCREAMING_SNAKE_CASE =student_model_class(lowerCAmelCase_ ) if args.n_gpu > 0: student.to(F'cuda:{args.local_rank}' ) logger.info('Student loaded.' ) # TEACHER # SCREAMING_SNAKE_CASE =teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=lowerCAmelCase_ ) if args.n_gpu > 0: teacher.to(F'cuda:{args.local_rank}' ) logger.info(F'Teacher loaded from {args.teacher_name}.' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(lowerCAmelCase_, lowerCAmelCase_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(lowerCAmelCase_, lowerCAmelCase_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() SCREAMING_SNAKE_CASE =Distiller( params=lowerCAmelCase_, dataset=lowerCAmelCase_, token_probs=lowerCAmelCase_, student=lowerCAmelCase_, teacher=lowerCAmelCase_ ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
334
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase =[ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] _lowerCamelCase =[ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' ) return sd def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE =OrderedDict() SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE =key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] ) SCREAMING_SNAKE_CASE =d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias'] return new_d @torch.no_grad() def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE ='pretraining' if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024} else: raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512} SCREAMING_SNAKE_CASE ='multichoice' elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048} SCREAMING_SNAKE_CASE ='vqa_advanced' elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129} SCREAMING_SNAKE_CASE ='vqa' elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE ={ 'visual_embedding_dim': 1024, 'num_labels': 2, } SCREAMING_SNAKE_CASE ='nlvr' SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ ) # Load State Dict SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ ) if model_type == "pretraining": SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ ) elif model_type == "vqa": SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) # Save Checkpoints Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") _lowerCamelCase =parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
334
1
from __future__ import annotations def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if len(lowerCAmelCase_ ) <= 1 or n <= 1: return insert_next(lowerCAmelCase_, n - 1 ) rec_insertion_sort(lowerCAmelCase_, n - 1 ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if index >= len(lowerCAmelCase_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =( collection[index], collection[index - 1], ) insert_next(lowerCAmelCase_, index + 1 ) if __name__ == "__main__": _lowerCamelCase =input("Enter integers separated by spaces: ") _lowerCamelCase =[int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
334
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json", } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'nllb-moe' __UpperCAmelCase = ['past_key_values'] __UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,): SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =max_position_embeddings SCREAMING_SNAKE_CASE =d_model SCREAMING_SNAKE_CASE =encoder_ffn_dim SCREAMING_SNAKE_CASE =encoder_layers SCREAMING_SNAKE_CASE =encoder_attention_heads SCREAMING_SNAKE_CASE =decoder_ffn_dim SCREAMING_SNAKE_CASE =decoder_layers SCREAMING_SNAKE_CASE =decoder_attention_heads SCREAMING_SNAKE_CASE =dropout SCREAMING_SNAKE_CASE =attention_dropout SCREAMING_SNAKE_CASE =activation_dropout SCREAMING_SNAKE_CASE =activation_function SCREAMING_SNAKE_CASE =init_std SCREAMING_SNAKE_CASE =encoder_layerdrop SCREAMING_SNAKE_CASE =decoder_layerdrop SCREAMING_SNAKE_CASE =use_cache SCREAMING_SNAKE_CASE =encoder_layers SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE =router_z_loss_coef SCREAMING_SNAKE_CASE =router_aux_loss_coef SCREAMING_SNAKE_CASE =decoder_sparse_step SCREAMING_SNAKE_CASE =encoder_sparse_step SCREAMING_SNAKE_CASE =num_experts SCREAMING_SNAKE_CASE =expert_capacity SCREAMING_SNAKE_CASE =router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' ) SCREAMING_SNAKE_CASE =router_dtype SCREAMING_SNAKE_CASE =router_ignore_padding_tokens SCREAMING_SNAKE_CASE =batch_prioritized_routing SCREAMING_SNAKE_CASE =second_expert_policy SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction SCREAMING_SNAKE_CASE =moe_token_dropout SCREAMING_SNAKE_CASE =output_router_logits super().__init__( pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
334
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'gpt_bigcode' __UpperCAmelCase = ['past_key_values'] __UpperCAmelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[int] ,snake_case : Union[str, Any]=50257 ,snake_case : Optional[int]=1024 ,snake_case : Optional[int]=768 ,snake_case : Dict=12 ,snake_case : Dict=12 ,snake_case : str=None ,snake_case : List[str]="gelu_pytorch_tanh" ,snake_case : Optional[int]=0.1 ,snake_case : Tuple=0.1 ,snake_case : Optional[Any]=0.1 ,snake_case : Dict=1e-5 ,snake_case : List[str]=0.02 ,snake_case : Dict=True ,snake_case : Tuple=True ,snake_case : Optional[int]=50256 ,snake_case : Dict=50256 ,snake_case : Any=True ,snake_case : Any=True ,snake_case : Optional[int]=True ,**snake_case : str ,): SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =n_positions SCREAMING_SNAKE_CASE =n_embd SCREAMING_SNAKE_CASE =n_layer SCREAMING_SNAKE_CASE =n_head SCREAMING_SNAKE_CASE =n_inner SCREAMING_SNAKE_CASE =activation_function SCREAMING_SNAKE_CASE =resid_pdrop SCREAMING_SNAKE_CASE =embd_pdrop SCREAMING_SNAKE_CASE =attn_pdrop SCREAMING_SNAKE_CASE =layer_norm_epsilon SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =scale_attn_weights SCREAMING_SNAKE_CASE =use_cache SCREAMING_SNAKE_CASE =attention_softmax_in_fpaa SCREAMING_SNAKE_CASE =scale_attention_softmax_in_fpaa SCREAMING_SNAKE_CASE =multi_query SCREAMING_SNAKE_CASE =bos_token_id SCREAMING_SNAKE_CASE =eos_token_id super().__init__(bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
334
from __future__ import annotations def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =sorted(numsa + numsa ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()] _lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()] print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
334
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase =logging.get_logger(__name__) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: SCREAMING_SNAKE_CASE =192 SCREAMING_SNAKE_CASE =768 SCREAMING_SNAKE_CASE =12 SCREAMING_SNAKE_CASE =3 SCREAMING_SNAKE_CASE =[800, 1333] SCREAMING_SNAKE_CASE =False elif yolos_name == "yolos_s_dWr": SCREAMING_SNAKE_CASE =330 SCREAMING_SNAKE_CASE =14 SCREAMING_SNAKE_CASE =6 SCREAMING_SNAKE_CASE =1320 elif "yolos_s" in yolos_name: SCREAMING_SNAKE_CASE =384 SCREAMING_SNAKE_CASE =1536 SCREAMING_SNAKE_CASE =12 SCREAMING_SNAKE_CASE =6 elif "yolos_b" in yolos_name: SCREAMING_SNAKE_CASE =[800, 1344] SCREAMING_SNAKE_CASE =91 SCREAMING_SNAKE_CASE ='huggingface/label-files' SCREAMING_SNAKE_CASE ='coco-detection-id2label.json' SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) ) SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =idalabel SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()} return config def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = False ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE =state_dict.pop(F'blocks.{i}.attn.qkv.weight' ) SCREAMING_SNAKE_CASE =state_dict.pop(F'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE =in_proj_weight[: config.hidden_size, :] SCREAMING_SNAKE_CASE =in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE =in_proj_weight[-config.hidden_size :, :] SCREAMING_SNAKE_CASE =in_proj_bias[-config.hidden_size :] def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" if "backbone" in name: SCREAMING_SNAKE_CASE =name.replace('backbone', 'vit' ) if "cls_token" in name: SCREAMING_SNAKE_CASE =name.replace('cls_token', 'embeddings.cls_token' ) if "det_token" in name: SCREAMING_SNAKE_CASE =name.replace('det_token', 'embeddings.detection_tokens' ) if "mid_pos_embed" in name: SCREAMING_SNAKE_CASE =name.replace('mid_pos_embed', 'encoder.mid_position_embeddings' ) if "pos_embed" in name: SCREAMING_SNAKE_CASE =name.replace('pos_embed', 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE =name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' ) if "blocks" in name: SCREAMING_SNAKE_CASE =name.replace('blocks', 'encoder.layer' ) if "attn.proj" in name: SCREAMING_SNAKE_CASE =name.replace('attn.proj', 'attention.output.dense' ) if "attn" in name: SCREAMING_SNAKE_CASE =name.replace('attn', 'attention.self' ) if "norm1" in name: SCREAMING_SNAKE_CASE =name.replace('norm1', 'layernorm_before' ) if "norm2" in name: SCREAMING_SNAKE_CASE =name.replace('norm2', 'layernorm_after' ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE =name.replace('mlp.fc1', 'intermediate.dense' ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE =name.replace('mlp.fc2', 'output.dense' ) if "class_embed" in name: SCREAMING_SNAKE_CASE =name.replace('class_embed', 'class_labels_classifier' ) if "bbox_embed" in name: SCREAMING_SNAKE_CASE =name.replace('bbox_embed', 'bbox_predictor' ) if "vit.norm" in name: SCREAMING_SNAKE_CASE =name.replace('vit.norm', 'vit.layernorm' ) return name def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE =orig_state_dict.pop(lowerCAmelCase_ ) if "qkv" in key: SCREAMING_SNAKE_CASE =key.split('.' ) SCREAMING_SNAKE_CASE =int(key_split[2] ) SCREAMING_SNAKE_CASE =model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE =val[:dim, :] SCREAMING_SNAKE_CASE =val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE =val[-dim:, :] else: SCREAMING_SNAKE_CASE =val[:dim] SCREAMING_SNAKE_CASE =val[dim : dim * 2] SCREAMING_SNAKE_CASE =val[-dim:] else: SCREAMING_SNAKE_CASE =val return orig_state_dict def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE ='http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE =Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = False ): """simple docstring""" SCREAMING_SNAKE_CASE =get_yolos_config(lowerCAmelCase_ ) # load original state_dict SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )['model'] # load 🤗 model SCREAMING_SNAKE_CASE =YolosForObjectDetection(lowerCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE =convert_state_dict(lowerCAmelCase_, lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) # Check outputs on an image, prepared by YolosImageProcessor SCREAMING_SNAKE_CASE =800 if yolos_name != 'yolos_ti' else 512 SCREAMING_SNAKE_CASE =YolosImageProcessor(format='coco_detection', size=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =image_processor(images=prepare_img(), return_tensors='pt' ) SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =outputs.logits, outputs.pred_boxes SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =None, None if yolos_name == "yolos_ti": SCREAMING_SNAKE_CASE =torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) SCREAMING_SNAKE_CASE =torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": SCREAMING_SNAKE_CASE =torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) SCREAMING_SNAKE_CASE =torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": SCREAMING_SNAKE_CASE =torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) SCREAMING_SNAKE_CASE =torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": SCREAMING_SNAKE_CASE =torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) SCREAMING_SNAKE_CASE =torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": SCREAMING_SNAKE_CASE =torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) SCREAMING_SNAKE_CASE =torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F'Unknown yolos_name: {yolos_name}' ) assert torch.allclose(logits[0, :3, :3], lowerCAmelCase_, atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3], lowerCAmelCase_, atol=1e-4 ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCAmelCase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: SCREAMING_SNAKE_CASE ={ 'yolos_ti': 'yolos-tiny', 'yolos_s_200_pre': 'yolos-small', 'yolos_s_300_pre': 'yolos-small-300', 'yolos_s_dWr': 'yolos-small-dwr', 'yolos_base': 'yolos-base', } print('Pushing to the hub...' ) SCREAMING_SNAKE_CASE =model_mapping[yolos_name] image_processor.push_to_hub(lowerCAmelCase_, organization='hustvl' ) model.push_to_hub(lowerCAmelCase_, organization='hustvl' ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowerCamelCase =parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
334
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'transfo-xl' __UpperCAmelCase = ['mems'] __UpperCAmelCase = { 'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,): SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =[] self.cutoffs.extend(snake_case ) if proj_share_all_but_first: SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs ) else: SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs ) SCREAMING_SNAKE_CASE =d_model SCREAMING_SNAKE_CASE =d_embed SCREAMING_SNAKE_CASE =d_head SCREAMING_SNAKE_CASE =d_inner SCREAMING_SNAKE_CASE =div_val SCREAMING_SNAKE_CASE =pre_lnorm SCREAMING_SNAKE_CASE =n_layer SCREAMING_SNAKE_CASE =n_head SCREAMING_SNAKE_CASE =mem_len SCREAMING_SNAKE_CASE =same_length SCREAMING_SNAKE_CASE =attn_type SCREAMING_SNAKE_CASE =clamp_len SCREAMING_SNAKE_CASE =sample_softmax SCREAMING_SNAKE_CASE =adaptive SCREAMING_SNAKE_CASE =dropout SCREAMING_SNAKE_CASE =dropatt SCREAMING_SNAKE_CASE =untie_r SCREAMING_SNAKE_CASE =init SCREAMING_SNAKE_CASE =init_range SCREAMING_SNAKE_CASE =proj_init_std SCREAMING_SNAKE_CASE =init_std SCREAMING_SNAKE_CASE =layer_norm_epsilon super().__init__(eos_token_id=snake_case ,**snake_case ) @property def _lowerCAmelCase ( self : str ): # Message copied from Transformer-XL documentation logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' ) return -1 @max_position_embeddings.setter def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ): # Message copied from Transformer-XL documentation raise NotImplementedError( f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
334
1
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def snake_case__ ( lowerCAmelCase_ = 3 ): """simple docstring""" if isinstance(lowerCAmelCase_, lowerCAmelCase_ ): raise TypeError('number of qubits must be a integer.' ) if number_of_qubits <= 0: raise ValueError('number of qubits must be > 0.' ) if math.floor(lowerCAmelCase_ ) != number_of_qubits: raise ValueError('number of qubits must be exact integer.' ) if number_of_qubits > 10: raise ValueError('number of qubits too large to simulate(>10).' ) SCREAMING_SNAKE_CASE =QuantumRegister(lowerCAmelCase_, 'qr' ) SCREAMING_SNAKE_CASE =ClassicalRegister(lowerCAmelCase_, 'cr' ) SCREAMING_SNAKE_CASE =QuantumCircuit(lowerCAmelCase_, lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =number_of_qubits for i in range(lowerCAmelCase_ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(lowerCAmelCase_ ): quantum_circuit.cp(np.pi / 2 ** (counter - j), lowerCAmelCase_, lowerCAmelCase_ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(lowerCAmelCase_, number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(lowerCAmelCase_, lowerCAmelCase_ ) # simulate with 10000 shots SCREAMING_SNAKE_CASE =Aer.get_backend('qasm_simulator' ) SCREAMING_SNAKE_CASE =execute(lowerCAmelCase_, lowerCAmelCase_, shots=10000 ) return job.result().get_counts(lowerCAmelCase_ ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
334
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class a_ : """simple docstring""" def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,): SCREAMING_SNAKE_CASE =parent SCREAMING_SNAKE_CASE =100 SCREAMING_SNAKE_CASE =batch_size SCREAMING_SNAKE_CASE =image_size SCREAMING_SNAKE_CASE =patch_size SCREAMING_SNAKE_CASE =num_channels SCREAMING_SNAKE_CASE =is_training SCREAMING_SNAKE_CASE =use_labels SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =type_sequence_label_size SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =scope SCREAMING_SNAKE_CASE =out_indices SCREAMING_SNAKE_CASE =num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE =num_patches + 1 def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =None if self.use_labels: SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size ) SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) SCREAMING_SNAKE_CASE =self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCAmelCase ( self : Dict ): return BeitConfig( vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,) def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ): SCREAMING_SNAKE_CASE =BeitModel(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ): SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ): SCREAMING_SNAKE_CASE =self.type_sequence_label_size SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE =1 SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ): SCREAMING_SNAKE_CASE =self.num_labels SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _lowerCAmelCase ( self : str ): SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values} return config, inputs_dict @require_torch class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) __UpperCAmelCase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =BeitModelTester(self ) SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 ) def _lowerCAmelCase ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason='BEiT does not use inputs_embeds' ) def _lowerCAmelCase ( self : List[Any] ): pass @require_torch_multi_gpu @unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _lowerCAmelCase ( self : Union[str, Any] ): pass def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE =model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) SCREAMING_SNAKE_CASE =model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) ) def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE =model_class(snake_case ) SCREAMING_SNAKE_CASE =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE =['pixel_values'] self.assertListEqual(arg_names[:1] ,snake_case ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case ) def _lowerCAmelCase ( self : Any ): if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE =True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]: continue SCREAMING_SNAKE_CASE =model_class(snake_case ) model.to(snake_case ) model.train() SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case ) SCREAMING_SNAKE_CASE =model(**snake_case ).loss loss.backward() def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE =False SCREAMING_SNAKE_CASE =True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue SCREAMING_SNAKE_CASE =model_class(snake_case ) model.gradient_checkpointing_enable() model.to(snake_case ) model.train() SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case ) SCREAMING_SNAKE_CASE =model(**snake_case ).loss loss.backward() def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE =_config_zero_init(snake_case ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE =model_class(config=snake_case ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,) @slow def _lowerCAmelCase ( self : List[str] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a_ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self : Tuple ): return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case ) SCREAMING_SNAKE_CASE =self.default_image_processor SCREAMING_SNAKE_CASE =prepare_img() SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case ) # prepare bool_masked_pos SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case ) SCREAMING_SNAKE_CASE =outputs.logits # verify the logits SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape ,snake_case ) SCREAMING_SNAKE_CASE =torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) ) @slow def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case ) SCREAMING_SNAKE_CASE =self.default_image_processor SCREAMING_SNAKE_CASE =prepare_img() SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**snake_case ) SCREAMING_SNAKE_CASE =outputs.logits # verify the logits SCREAMING_SNAKE_CASE =torch.Size((1, 1000) ) self.assertEqual(logits.shape ,snake_case ) SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case ) self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) ) SCREAMING_SNAKE_CASE =281 self.assertEqual(logits.argmax(-1 ).item() ,snake_case ) @slow def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to( snake_case ) SCREAMING_SNAKE_CASE =self.default_image_processor SCREAMING_SNAKE_CASE =prepare_img() SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**snake_case ) SCREAMING_SNAKE_CASE =outputs.logits # verify the logits SCREAMING_SNAKE_CASE =torch.Size((1, 21841) ) self.assertEqual(logits.shape ,snake_case ) SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case ) self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) ) SCREAMING_SNAKE_CASE =2396 self.assertEqual(logits.argmax(-1 ).item() ,snake_case ) @slow def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) SCREAMING_SNAKE_CASE =model.to(snake_case ) SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case ) SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' ) SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**snake_case ) SCREAMING_SNAKE_CASE =outputs.logits # verify the logits SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape ,snake_case ) SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' ) if is_pillow_less_than_a: SCREAMING_SNAKE_CASE =torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ] ,device=snake_case ,) else: SCREAMING_SNAKE_CASE =torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ] ,device=snake_case ,) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) ) @slow def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) SCREAMING_SNAKE_CASE =model.to(snake_case ) SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case ) SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' ) SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] ) SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**snake_case ) SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] ) SCREAMING_SNAKE_CASE =torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape ,snake_case ) SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ) SCREAMING_SNAKE_CASE =torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape ,snake_case )
334
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase ={ "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
334
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase =logging.get_logger() @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self : List[str] ,snake_case : Tensor ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def _lowerCAmelCase ( self : Optional[Any] ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) ) @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 1 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = True def __call__( self : str ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) ) SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) ) if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch: raise Exception( f'Numbers of operations are different. Source module has {len(snake_case )} operations while' f' destination module has {len(snake_case )}.' ) for dest_m, src_m in zip(snake_case ,snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}' ) class a_ ( nn.Module ): """simple docstring""" def __init__( self : Any ,snake_case : nn.Module ): super().__init__() SCREAMING_SNAKE_CASE =[] # - get the stem feature_blocks.append(('conv1', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('block' ), f'Unexpected layer name {k}' SCREAMING_SNAKE_CASE =len(snake_case ) + 1 feature_blocks.append((f'res{block_index}', v) ) SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case ) def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ): return get_trunk_forward_outputs( snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,) class a_ ( lowerCamelCase_ ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ): SCREAMING_SNAKE_CASE =x.split('-' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : Optional[Any] ,snake_case : str ): # default to timm! if x not in self: SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case ) SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) ) else: SCREAMING_SNAKE_CASE =super().__getitem__(snake_case ) return val class a_ ( lowerCamelCase_ ): """simple docstring""" def __getitem__( self : int ,snake_case : str ): if "seer" in x and "in1k" not in x: SCREAMING_SNAKE_CASE =RegNetModel else: SCREAMING_SNAKE_CASE =RegNetForImageClassification return val def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" for from_key, to_key in keys: SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone() print(F'Copied key={from_key} to={to_key}' ) return to_state_dict def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ): """simple docstring""" print(F'Converting {name}...' ) with torch.no_grad(): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func() SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval() SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) ) module_transfer(lowerCAmelCase_ ) if from_state_dict is not None: SCREAMING_SNAKE_CASE =[] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')] SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ ) our_model.load_state_dict(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =( our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state ) SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1] assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, ) SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384 # we can use the convnext one SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ ) image_processor.push_to_hub( repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, ) print(F'Pushed {name}' ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ): """simple docstring""" SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE =1000 SCREAMING_SNAKE_CASE =(1, num_labels) SCREAMING_SNAKE_CASE ='huggingface/label-files' SCREAMING_SNAKE_CASE =num_labels SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) ) SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =idalabel SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE ={ 'regnet-x-002': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ), 'regnet-x-004': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ), 'regnet-x-006': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ), 'regnet-x-008': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ), 'regnet-x-016': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ), 'regnet-x-032': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ), 'regnet-x-040': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ), 'regnet-x-064': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ), 'regnet-x-080': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ), 'regnet-x-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ), 'regnet-x-160': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ), 'regnet-x-320': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ), # y variant 'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ), 'regnet-y-004': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ), 'regnet-y-006': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ), 'regnet-y-008': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ), 'regnet-y-016': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ), 'regnet-y-032': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ), 'regnet-y-040': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ), 'regnet-y-064': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ), 'regnet-y-080': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ), 'regnet-y-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ), 'regnet-y-160': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ), 'regnet-y-320': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ), 'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ), 'regnet-y-1280-seer': RegNetConfig( depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ), 'regnet-y-2560-seer': RegNetConfig( depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ), 'regnet-y-10b-seer': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), # finetuned on imagenet 'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ), 'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ), 'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ), 'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ), 'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), } SCREAMING_SNAKE_CASE =NameToOurModelFuncMap() SCREAMING_SNAKE_CASE =NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]: SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' ) SCREAMING_SNAKE_CASE =model_func() # check if we have a head, if yes add it SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model'] SCREAMING_SNAKE_CASE =model_state_dict['trunk'] model.load_state_dict(lowerCAmelCase_ ) return model.eval(), model_state_dict["heads"] # pretrained SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), ) # IN1K finetuned SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), ) if model_name: convert_weight_and_push( lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ) return config, expected_shape if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported regnet* architecture," " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) _lowerCamelCase =parser.parse_args() _lowerCamelCase =args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
1
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
334
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCamelCase =16 _lowerCamelCase =32 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' ) SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE =datasets.map( lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE =16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE =8 else: SCREAMING_SNAKE_CASE =None return tokenizer.pad( lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE =DataLoader( tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =DataLoader( tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCamelCase =mocked_dataloaders # noqa: F811 def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1": SCREAMING_SNAKE_CASE =2 # Initialize accelerator SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE =config['lr'] SCREAMING_SNAKE_CASE =int(config['num_epochs'] ) SCREAMING_SNAKE_CASE =int(config['seed'] ) SCREAMING_SNAKE_CASE =int(config['batch_size'] ) SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase_ ) def inner_training_loop(lowerCAmelCase_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE =model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ ) # Instantiate scheduler SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =outputs.loss accelerator.backward(lowerCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=lowerCAmelCase_, references=lowerCAmelCase_, ) SCREAMING_SNAKE_CASE =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.', ) parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' ) SCREAMING_SNAKE_CASE =parser.parse_args() SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(lowerCAmelCase_, lowerCAmelCase_ ) if __name__ == "__main__": main()
334
1
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) _lowerCamelCase =logging.getLogger(__name__) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =git.Repo(search_parent_directories=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE ={ 'repo_id': str(lowerCAmelCase_ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), } with open(os.path.join(lowerCAmelCase_, 'git_log.json' ), 'w' ) as f: json.dump(lowerCAmelCase_, lowerCAmelCase_, indent=4 ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" if params.n_gpu <= 0: SCREAMING_SNAKE_CASE =0 SCREAMING_SNAKE_CASE =-1 SCREAMING_SNAKE_CASE =True SCREAMING_SNAKE_CASE =False return assert torch.cuda.is_available() logger.info('Initializing GPUs' ) if params.n_gpu > 1: assert params.local_rank != -1 SCREAMING_SNAKE_CASE =int(os.environ['WORLD_SIZE'] ) SCREAMING_SNAKE_CASE =int(os.environ['N_GPU_NODE'] ) SCREAMING_SNAKE_CASE =int(os.environ['RANK'] ) # number of nodes / node ID SCREAMING_SNAKE_CASE =params.world_size // params.n_gpu_per_node SCREAMING_SNAKE_CASE =params.global_rank // params.n_gpu_per_node SCREAMING_SNAKE_CASE =True assert params.n_nodes == int(os.environ['N_NODES'] ) assert params.node_id == int(os.environ['NODE_RANK'] ) # local job (single GPU) else: assert params.local_rank == -1 SCREAMING_SNAKE_CASE =1 SCREAMING_SNAKE_CASE =0 SCREAMING_SNAKE_CASE =0 SCREAMING_SNAKE_CASE =0 SCREAMING_SNAKE_CASE =1 SCREAMING_SNAKE_CASE =1 SCREAMING_SNAKE_CASE =False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode SCREAMING_SNAKE_CASE =params.node_id == 0 and params.local_rank == 0 SCREAMING_SNAKE_CASE =params.n_nodes > 1 # summary SCREAMING_SNAKE_CASE =F'--- Global rank: {params.global_rank} - ' logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes ) logger.info(PREFIX + 'Node ID : %i' % params.node_id ) logger.info(PREFIX + 'Local rank : %i' % params.local_rank ) logger.info(PREFIX + 'World size : %i' % params.world_size ) logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node ) logger.info(PREFIX + 'Master : %s' % str(params.is_master ) ) logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) ) logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) ) logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('Initializing PyTorch distributed' ) torch.distributed.init_process_group( init_method='env://', backend='nccl', ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
334
def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" return " ".join( ''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("Hey wollef sroirraw"))
334
1
from typing import TYPE_CHECKING from ...utils import _LazyModule _lowerCamelCase ={"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
334
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json _lowerCamelCase ="sshleifer/mar_enro_6_3_student" class a_ ( lowerCamelCase_ ): """simple docstring""" def _lowerCAmelCase ( self : Union[str, Any] ): super().setUp() SCREAMING_SNAKE_CASE =cached_path( 'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,) SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k' @slow @require_torch_gpu def _lowerCAmelCase ( self : Optional[int] ): MarianMTModel.from_pretrained(snake_case ) @slow @require_torch_gpu def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE ={ '$MAX_LEN': 64, '$BS': 64, '$GAS': 1, '$ENRO_DIR': self.data_dir, 'facebook/mbart-large-cc25': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '--learning_rate=3e-5': '--learning_rate 3e-4', '--num_train_epochs 6': '--num_train_epochs 1', } # Clean up bash script SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip() SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' ) for k, v in env_vars_to_replace.items(): SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) ) SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split() # XXX: args.gpus > 1 : handle multi_gpu in the future SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args with patch.object(snake_case ,'argv' ,snake_case ): SCREAMING_SNAKE_CASE =argparse.ArgumentParser() SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case ) SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() ) SCREAMING_SNAKE_CASE =parser.parse_args() SCREAMING_SNAKE_CASE =main(snake_case ) # Check metrics SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path ) SCREAMING_SNAKE_CASE =metrics['val'][0] SCREAMING_SNAKE_CASE =metrics['val'][-1] self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case ) self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['val_avg_bleu'] ,17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict SCREAMING_SNAKE_CASE =os.listdir(snake_case ) SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0] SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case ) SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' ) SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1 class a_ ( lowerCamelCase_ ): """simple docstring""" @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro' SCREAMING_SNAKE_CASE ={ '--fp16_opt_level=O1': '', '$MAX_LEN': 128, '$BS': 16, '$GAS': 1, '$ENRO_DIR': data_dir, '$m': 'sshleifer/student_marian_en_ro_6_1', 'val_check_interval=0.25': 'val_check_interval=1.0', } # Clean up bash script SCREAMING_SNAKE_CASE =( (self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip() ) SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' ) SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' ) for k, v in env_vars_to_replace.items(): SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) ) SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' ) SCREAMING_SNAKE_CASE =6 SCREAMING_SNAKE_CASE =( ['distillation.py'] + bash_script.split() + [ f'--output_dir={output_dir}', '--gpus=1', '--learning_rate=1e-3', f'--num_train_epochs={epochs}', '--warmup_steps=10', '--val_check_interval=1.0', '--do_predict', ] ) with patch.object(snake_case ,'argv' ,snake_case ): SCREAMING_SNAKE_CASE =argparse.ArgumentParser() SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case ) SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() ) SCREAMING_SNAKE_CASE =parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu SCREAMING_SNAKE_CASE =distill_main(snake_case ) # Check metrics SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path ) SCREAMING_SNAKE_CASE =metrics['val'][0] SCREAMING_SNAKE_CASE =metrics['val'][-1] assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case ) # check lightning ckpt can be loaded and has a reasonable statedict SCREAMING_SNAKE_CASE =os.listdir(snake_case ) SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0] SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case ) SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' ) SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1
334
1
def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" if not all(char in '01' for char in bin_string ): raise ValueError('Non-binary value was passed to the function' ) if not bin_string: raise ValueError('Empty string was passed to the function' ) SCREAMING_SNAKE_CASE ='' while len(lowerCAmelCase_ ) % 3 != 0: SCREAMING_SNAKE_CASE ='0' + bin_string SCREAMING_SNAKE_CASE =[ bin_string[index : index + 3] for index in range(len(lowerCAmelCase_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: SCREAMING_SNAKE_CASE =0 for index, val in enumerate(lowerCAmelCase_ ): oct_val += int(2 ** (2 - index) * int(lowerCAmelCase_ ) ) oct_string += str(lowerCAmelCase_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
334
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json", } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip_2_vision_model' def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,): super().__init__(**snake_case ) SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =patch_size SCREAMING_SNAKE_CASE =image_size SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =attention_dropout SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =qkv_bias @classmethod def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ): cls._set_token_in_kwargs(snake_case ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": SCREAMING_SNAKE_CASE =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(snake_case ,**snake_case ) class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip_2_qformer' def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,): super().__init__(pad_token_id=snake_case ,**snake_case ) SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =max_position_embeddings SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =position_embedding_type SCREAMING_SNAKE_CASE =cross_attention_frequency SCREAMING_SNAKE_CASE =encoder_hidden_size @classmethod def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ): cls._set_token_in_kwargs(snake_case ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": SCREAMING_SNAKE_CASE =config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(snake_case ,**snake_case ) class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip-2' __UpperCAmelCase = True def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ): super().__init__(**snake_case ) if vision_config is None: SCREAMING_SNAKE_CASE ={} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' ) if qformer_config is None: SCREAMING_SNAKE_CASE ={} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' ) if text_config is None: SCREAMING_SNAKE_CASE ={} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case ) SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case ) SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt' SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case ) SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder SCREAMING_SNAKE_CASE =num_query_tokens SCREAMING_SNAKE_CASE =self.vision_config.hidden_size SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES SCREAMING_SNAKE_CASE =1.0 SCREAMING_SNAKE_CASE =0.02 @classmethod def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,): return cls( vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE =self.vision_config.to_dict() SCREAMING_SNAKE_CASE =self.qformer_config.to_dict() SCREAMING_SNAKE_CASE =self.text_config.to_dict() SCREAMING_SNAKE_CASE =self.__class__.model_type return output
334
1
def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" if number < 0: raise ValueError('number must not be negative' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
334
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets _lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n" _lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n" _lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): """simple docstring""" def _lowerCAmelCase ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('string' ,id='sequence' ), 'references': datasets.Value('string' ,id='sequence' ), } ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[ 'https://arxiv.org/abs/2102.01454', 'https://github.com/krishnap25/mauve', ] ,) def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,): SCREAMING_SNAKE_CASE =compute_mauve( p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,) return out
334
1
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase =get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class a_ ( lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = SpeechTaTokenizer __UpperCAmelCase = False __UpperCAmelCase = True def _lowerCAmelCase ( self : Optional[Any] ): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE =SpeechTaTokenizer(snake_case ) SCREAMING_SNAKE_CASE =AddedToken('<mask>' ,lstrip=snake_case ,rstrip=snake_case ) SCREAMING_SNAKE_CASE =mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[Any] ): SCREAMING_SNAKE_CASE ='this is a test' SCREAMING_SNAKE_CASE ='this is a test' return input_text, output_text def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Any ,snake_case : int=False ,snake_case : str=20 ,snake_case : Any=5 ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.get_input_output_texts(snake_case ) SCREAMING_SNAKE_CASE =tokenizer.encode(snake_case ,add_special_tokens=snake_case ) SCREAMING_SNAKE_CASE =tokenizer.decode(snake_case ,clean_up_tokenization_spaces=snake_case ) return text, ids def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE ='<pad>' SCREAMING_SNAKE_CASE =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) ,snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) ,snake_case ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<s>' ) self.assertEqual(vocab_keys[1] ,'<pad>' ) self.assertEqual(vocab_keys[-4] ,'œ' ) self.assertEqual(vocab_keys[-2] ,'<mask>' ) self.assertEqual(vocab_keys[-1] ,'<ctc_blank>' ) self.assertEqual(len(snake_case ) ,81 ) def _lowerCAmelCase ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size ,79 ) def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.get_tokenizers(do_lower_case=snake_case ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): SCREAMING_SNAKE_CASE =tokenizer.vocab_size SCREAMING_SNAKE_CASE =len(snake_case ) self.assertNotEqual(snake_case ,0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) SCREAMING_SNAKE_CASE =['aaaaa bbbbbb', 'cccccccccdddddddd'] SCREAMING_SNAKE_CASE =tokenizer.add_tokens(snake_case ) SCREAMING_SNAKE_CASE =tokenizer.vocab_size SCREAMING_SNAKE_CASE =len(snake_case ) self.assertNotEqual(snake_case ,0 ) self.assertEqual(snake_case ,snake_case ) self.assertEqual(snake_case ,len(snake_case ) ) self.assertEqual(snake_case ,all_size + len(snake_case ) ) SCREAMING_SNAKE_CASE =tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' ,add_special_tokens=snake_case ) self.assertGreaterEqual(len(snake_case ) ,4 ) self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 ) SCREAMING_SNAKE_CASE ={'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} SCREAMING_SNAKE_CASE =tokenizer.add_special_tokens(snake_case ) SCREAMING_SNAKE_CASE =tokenizer.vocab_size SCREAMING_SNAKE_CASE =len(snake_case ) self.assertNotEqual(snake_case ,0 ) self.assertEqual(snake_case ,snake_case ) self.assertEqual(snake_case ,len(snake_case ) ) self.assertEqual(snake_case ,all_size_a + len(snake_case ) ) SCREAMING_SNAKE_CASE =tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' ,add_special_tokens=snake_case ) self.assertGreaterEqual(len(snake_case ) ,6 ) self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] ,tokens[1] ) self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] ,tokens[-4] ) self.assertEqual(tokens[0] ,tokenizer.eos_token_id ) self.assertEqual(tokens[-3] ,tokenizer.pad_token_id ) def _lowerCAmelCase ( self : Tuple ): pass def _lowerCAmelCase ( self : List[str] ): pass def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.get_tokenizer() SCREAMING_SNAKE_CASE =tokenizer.tokenize('This is a test' ) # fmt: off self.assertListEqual(snake_case ,[SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,) SCREAMING_SNAKE_CASE =tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) SCREAMING_SNAKE_CASE =tokenizer.convert_tokens_to_ids(snake_case ) # fmt: off self.assertListEqual(snake_case ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on SCREAMING_SNAKE_CASE =tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual( snake_case ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) @slow def _lowerCAmelCase ( self : Tuple ): # Use custom sequence because this tokenizer does not handle numbers. SCREAMING_SNAKE_CASE =[ 'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ' 'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ' 'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ' 'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.', 'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ' 'conditioning on both left and right context in all layers.', 'The quick brown fox jumps over the lazy dog.', ] # fmt: off SCREAMING_SNAKE_CASE ={ 'input_ids': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case ,model_name='microsoft/speecht5_asr' ,revision='c5ef64c71905caeccde0e4462ef3f9077224c524' ,sequences=snake_case ,)
334
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'vit_mae' def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,): super().__init__(**snake_case ) SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =image_size SCREAMING_SNAKE_CASE =patch_size SCREAMING_SNAKE_CASE =num_channels SCREAMING_SNAKE_CASE =qkv_bias SCREAMING_SNAKE_CASE =decoder_num_attention_heads SCREAMING_SNAKE_CASE =decoder_hidden_size SCREAMING_SNAKE_CASE =decoder_num_hidden_layers SCREAMING_SNAKE_CASE =decoder_intermediate_size SCREAMING_SNAKE_CASE =mask_ratio SCREAMING_SNAKE_CASE =norm_pix_loss
334
1
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) _lowerCamelCase =logging.getLogger(__name__) _lowerCamelCase =tf.data.AUTOTUNE def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Train a masked language model on TPU.' ) parser.add_argument( '--pretrained_model_config', type=lowerCAmelCase_, default='roberta-base', help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!', ) parser.add_argument( '--tokenizer', type=lowerCAmelCase_, default='unigram-tokenizer-wikitext', help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.', ) parser.add_argument( '--per_replica_batch_size', type=lowerCAmelCase_, default=8, help='Batch size per TPU core.', ) parser.add_argument( '--no_tpu', action='store_true', help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.', ) parser.add_argument( '--tpu_name', type=lowerCAmelCase_, help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.', default='local', ) parser.add_argument( '--tpu_zone', type=lowerCAmelCase_, help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.', ) parser.add_argument( '--gcp_project', type=lowerCAmelCase_, help='Google cloud project name. Only used for non-Colab TPU nodes.' ) parser.add_argument( '--bfloat16', action='store_true', help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.', ) parser.add_argument( '--train_dataset', type=lowerCAmelCase_, help='Path to training dataset to load. If the path begins with `gs://`' ' then the dataset will be loaded from a Google Cloud Storage bucket.', ) parser.add_argument( '--shuffle_buffer_size', type=lowerCAmelCase_, default=2**18, help='Size of the shuffle buffer (in samples)', ) parser.add_argument( '--eval_dataset', type=lowerCAmelCase_, help='Path to evaluation dataset to load. If the path begins with `gs://`' ' then the dataset will be loaded from a Google Cloud Storage bucket.', ) parser.add_argument( '--num_epochs', type=lowerCAmelCase_, default=1, help='Number of epochs to train for.', ) parser.add_argument( '--learning_rate', type=lowerCAmelCase_, default=1e-4, help='Learning rate to use for training.', ) parser.add_argument( '--weight_decay_rate', type=lowerCAmelCase_, default=1e-3, help='Weight decay rate to use for training.', ) parser.add_argument( '--max_length', type=lowerCAmelCase_, default=512, help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py', ) parser.add_argument( '--mlm_probability', type=lowerCAmelCase_, default=0.15, help='Fraction of tokens to mask during training.', ) parser.add_argument('--output_dir', type=lowerCAmelCase_, required=lowerCAmelCase_, help='Path to save model checkpoints to.' ) parser.add_argument('--hub_model_id', type=lowerCAmelCase_, help='Model ID to upload to on the Hugging Face Hub.' ) SCREAMING_SNAKE_CASE =parser.parse_args() return args def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" try: if args.tpu_name: SCREAMING_SNAKE_CASE =tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name, zone=args.tpu_zone, project=args.gcp_project ) else: SCREAMING_SNAKE_CASE =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( 'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or ' '--gcp_project. When running on a TPU VM, use --tpu_name local.' ) tf.config.experimental_connect_to_cluster(lowerCAmelCase_ ) tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase_ ) return tpu def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =0 for file in file_list: SCREAMING_SNAKE_CASE =file.split('/' )[-1] SCREAMING_SNAKE_CASE =re.search(r'-\d+-(\d+)\.tfrecord', lowerCAmelCase_ ).group(1 ) SCREAMING_SNAKE_CASE =int(lowerCAmelCase_ ) num_samples += sample_count return num_samples def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=None ): """simple docstring""" SCREAMING_SNAKE_CASE =count_samples(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =tf.data.Dataset.from_tensor_slices(lowerCAmelCase_ ) if shuffle: SCREAMING_SNAKE_CASE =dataset.shuffle(len(lowerCAmelCase_ ) ) SCREAMING_SNAKE_CASE =tf.data.TFRecordDataset(lowerCAmelCase_, num_parallel_reads=lowerCAmelCase_ ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here SCREAMING_SNAKE_CASE =dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase_ ) ) SCREAMING_SNAKE_CASE =dataset.map(lowerCAmelCase_, num_parallel_calls=lowerCAmelCase_ ) if shuffle: assert shuffle_buffer_size is not None SCREAMING_SNAKE_CASE =dataset.shuffle(args.shuffle_buffer_size ) SCREAMING_SNAKE_CASE =dataset.batch(lowerCAmelCase_, drop_remainder=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =dataset.map(lowerCAmelCase_, num_parallel_calls=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =dataset.prefetch(lowerCAmelCase_ ) return dataset def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" if not args.no_tpu: SCREAMING_SNAKE_CASE =initialize_tpu(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =tf.distribute.TPUStrategy(lowerCAmelCase_ ) else: SCREAMING_SNAKE_CASE =tf.distribute.OneDeviceStrategy(device='/gpu:0' ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' ) SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(args.tokenizer ) SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(args.pretrained_model_config ) SCREAMING_SNAKE_CASE =tokenizer.vocab_size SCREAMING_SNAKE_CASE =tf.io.gfile.glob(os.path.join(args.train_dataset, '*.tfrecord' ) ) if not training_records: raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' ) SCREAMING_SNAKE_CASE =tf.io.gfile.glob(os.path.join(args.eval_dataset, '*.tfrecord' ) ) if not eval_records: raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' ) SCREAMING_SNAKE_CASE =count_samples(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) SCREAMING_SNAKE_CASE =steps_per_epoch * args.num_epochs with strategy.scope(): SCREAMING_SNAKE_CASE =TFAutoModelForMaskedLM.from_config(lowerCAmelCase_ ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =create_optimizer( num_train_steps=lowerCAmelCase_, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowerCAmelCase_, metrics=['accuracy'] ) def decode_fn(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE ={ 'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ), 'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ), } return tf.io.parse_single_example(lowerCAmelCase_, lowerCAmelCase_ ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. SCREAMING_SNAKE_CASE =DataCollatorForLanguageModeling( tokenizer=lowerCAmelCase_, mlm_probability=args.mlm_probability, mlm=lowerCAmelCase_, return_tensors='tf' ) def mask_with_collator(lowerCAmelCase_ ): # TF really needs an isin() function SCREAMING_SNAKE_CASE =( ~tf.cast(batch['attention_mask'], tf.bool ) | (batch['input_ids'] == tokenizer.cls_token_id) | (batch['input_ids'] == tokenizer.sep_token_id) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =data_collator.tf_mask_tokens( batch['input_ids'], vocab_size=len(lowerCAmelCase_ ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=lowerCAmelCase_, ) return batch SCREAMING_SNAKE_CASE =args.per_replica_batch_size * strategy.num_replicas_in_sync SCREAMING_SNAKE_CASE =prepare_dataset( lowerCAmelCase_, decode_fn=lowerCAmelCase_, mask_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_, shuffle=lowerCAmelCase_, shuffle_buffer_size=args.shuffle_buffer_size, ) SCREAMING_SNAKE_CASE =prepare_dataset( lowerCAmelCase_, decode_fn=lowerCAmelCase_, mask_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_, shuffle=lowerCAmelCase_, ) SCREAMING_SNAKE_CASE =[] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=lowerCAmelCase_ ) ) model.fit( lowerCAmelCase_, validation_data=lowerCAmelCase_, epochs=args.num_epochs, callbacks=lowerCAmelCase_, ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": _lowerCamelCase =parse_args() main(args)
334
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available _lowerCamelCase ={ "configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST", "ErnieForCausalLM", "ErnieForMaskedLM", "ErnieForMultipleChoice", "ErnieForNextSentencePrediction", "ErnieForPreTraining", "ErnieForQuestionAnswering", "ErnieForSequenceClassification", "ErnieForTokenClassification", "ErnieModel", "ErniePreTrainedModel", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
334
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = 42 def __init__( self : Union[str, Any] ,snake_case : UNetaDModel ,snake_case : KarrasVeScheduler ): super().__init__() self.register_modules(unet=snake_case ,scheduler=snake_case ) @torch.no_grad() def __call__( self : Optional[int] ,snake_case : int = 1 ,snake_case : int = 50 ,snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,snake_case : Optional[str] = "pil" ,snake_case : bool = True ,**snake_case : int ,): SCREAMING_SNAKE_CASE =self.unet.config.sample_size SCREAMING_SNAKE_CASE =(batch_size, 3, img_size, img_size) SCREAMING_SNAKE_CASE =self.unet # sample x_0 ~ N(0, sigma_0^2 * I) SCREAMING_SNAKE_CASE =randn_tensor(snake_case ,generator=snake_case ,device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(snake_case ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper SCREAMING_SNAKE_CASE =self.scheduler.schedule[t] SCREAMING_SNAKE_CASE =self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.scheduler.add_noise_to_input(snake_case ,snake_case ,generator=snake_case ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. SCREAMING_SNAKE_CASE =(sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev SCREAMING_SNAKE_CASE =self.scheduler.step(snake_case ,snake_case ,snake_case ,snake_case ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. SCREAMING_SNAKE_CASE =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample SCREAMING_SNAKE_CASE =self.scheduler.step_correct( snake_case ,snake_case ,snake_case ,snake_case ,step_output.prev_sample ,step_output['derivative'] ,) SCREAMING_SNAKE_CASE =step_output.prev_sample SCREAMING_SNAKE_CASE =(sample / 2 + 0.5).clamp(0 ,1 ) SCREAMING_SNAKE_CASE =sample.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE =self.numpy_to_pil(snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case )
334
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class a_ ( lowerCamelCase_ ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type ,pa.intaa() ) def _lowerCAmelCase ( self : Any ): with self.assertRaises(snake_case ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() ) def _lowerCAmelCase ( self : Union[str, Any] ): with self.assertRaises(snake_case ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) ) def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) ) self.assertEqual(arr.type ,pa.intaa() ) def _lowerCAmelCase ( self : int ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) ) def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) ) self.assertEqual(arr.type ,pa.intaa() ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) ) self.assertEqual(arr.type ,pa.string() ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) ) self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) ) def _lowerCAmelCase ( self : Dict ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) ) def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) ) self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) ) self.assertEqual(arr.type ,pa.string() ) @require_pil def _lowerCAmelCase ( self : int ): import PIL.Image SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) ) with patch( 'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects: SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1] self.assertIn('optimize_list_casting' ,snake_case ) self.assertFalse(kwargs['optimize_list_casting'] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize('writer_batch_size', [None, 1, 10] ) @pytest.mark.parametrize( 'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer: writer.write({'col_1': 'foo', 'col_2': 1} ) writer.write({'col_1': 'bar', 'col_2': 2} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} ) with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer: writer.write({'labels': 0} ) writer.write({'labels': 1} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =f.read_all() SCREAMING_SNAKE_CASE =pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(lowerCAmelCase_ ) @pytest.mark.parametrize('writer_batch_size', [None, 1, 10] ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ArrowWriter( stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer: with pytest.raises(lowerCAmelCase_ ): writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() @pytest.mark.parametrize('writer_batch_size', [None, 2, 10] ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ArrowWriter( stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer: with pytest.raises(lowerCAmelCase_ ): writer.write({'col_1': 'foo', 'col_2': 1}, key=10 ) writer.write({'col_1': 'bar', 'col_2': 2}, key=10 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() @pytest.mark.parametrize('writer_batch_size', [None, 2, 10] ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ArrowWriter( stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer: writer.write({'col_1': 'foo', 'col_2': 1}, key=1 ) writer.write({'col_1': 'bar', 'col_2': 2}, key=2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('writer_batch_size', [None, 1, 10] ) @pytest.mark.parametrize( 'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer: writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) writer.write_batch({'col_1': [], 'col_2': []} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('writer_batch_size', [None, 1, 10] ) @pytest.mark.parametrize( 'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer: writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('writer_batch_size', [None, 1, 10] ) @pytest.mark.parametrize( 'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer: writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) ) writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def snake_case__ ( ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()} SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' ) with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer: writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata ) _check_output(lowerCAmelCase_, 1 ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" if pa.types.is_list(lowerCAmelCase_ ): return get_base_dtype(arr_type.value_type ) else: return arr_type def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if isinstance(lst[0], lowerCAmelCase_ ): change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ ) else: SCREAMING_SNAKE_CASE =value @pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] ) @pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( 'col, expected_dtype', [ ('attention_mask', pa.inta()), ('special_tokens_mask', pa.inta()), ('token_type_ids', pa.inta()), ('input_ids', pa.intaa()), ('other', pa.intaa()), ], ) @pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize('raise_exception', [False, True] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' ) try: with ArrowWriter(path=lowerCAmelCase_ ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow' with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs, type(lowerCAmelCase_ ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({'col_1': 'foo', 'col_2': 1} ) writer.write({'col_1': 'bar', 'col_2': 2} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(lowerCAmelCase_ ) def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ParquetWriter(stream=lowerCAmelCase_ ) as writer: writer.write({'col_1': 'foo', 'col_2': 1} ) writer.write({'col_1': 'bar', 'col_2': 2} ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize() assert num_examples == 2 assert num_bytes > 0 SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize('embed_local_files', [False, True] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" import PIL.Image SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' ) PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' ) SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ParquetWriter( stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer: writer.write({'image': image_path} ) writer.finalize() SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =pa_table.to_pydict() if embed_local_files: assert isinstance(out['image'][0]['path'], lowerCAmelCase_ ) with open(lowerCAmelCase_, 'rb' ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] ) SCREAMING_SNAKE_CASE =pa.BufferOutputStream() with ArrowWriter(stream=lowerCAmelCase_ ) as writer: writer._build_writer(inferred_schema=lowerCAmelCase_ ) assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
334
1
from __future__ import annotations def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =sorted(numsa + numsa ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()] _lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()] print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
334
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def snake_case__ ( ): """simple docstring""" assert or_gate(0, 0 ) == 0 assert or_gate(0, 1 ) == 1 assert or_gate(1, 0 ) == 1 assert or_gate(1, 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
334
1
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _lowerCamelCase =2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _lowerCamelCase =50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _lowerCamelCase =0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] ) return (item, float(lowerCAmelCase_ )) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 ) SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:] SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ ) if random.uniform(0, 1 ) < MUTATION_PROBABILITY: SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ): """simple docstring""" SCREAMING_SNAKE_CASE =[] # Generate more children proportionally to the fitness score. SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1 SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n for _ in range(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ ) # Append new string to the population list. pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) ) pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) ) return pop def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ): """simple docstring""" if N_POPULATION < N_SELECTED: SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(lowerCAmelCase_ ) # Verify that the target contains no genes besides the ones inside genes variable. SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} ) if not_in_genes_list: SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(lowerCAmelCase_ ) # Generate random starting population. SCREAMING_SNAKE_CASE =[] for _ in range(lowerCAmelCase_ ): population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) ) # Just some logs to know what the algorithms is doing. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowerCAmelCase_ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population] # Check if there is a matching evolution. SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowerCAmelCase_ ) # Normalize population score to be between 0 and 1. SCREAMING_SNAKE_CASE =[ (item, score / len(lowerCAmelCase_ )) for item, score in population_score ] # This is selection for i in range(lowerCAmelCase_ ): population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowerCAmelCase_ ) > N_POPULATION: break if __name__ == "__main__": _lowerCamelCase =( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) _lowerCamelCase =list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
334
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={"vocab_file": "vocab.txt"} _lowerCamelCase ={ "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } _lowerCamelCase ={ "facebook/esm2_t6_8M_UR50D": 10_24, "facebook/esm2_t12_35M_UR50D": 10_24, } def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" with open(lowerCAmelCase_, 'r' ) as f: SCREAMING_SNAKE_CASE =f.read().splitlines() return [l.strip() for l in lines] class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = ['input_ids', 'attention_mask'] def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,): super().__init__(**snake_case ) SCREAMING_SNAKE_CASE =load_vocab_file(snake_case ) SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) ) SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )} SCREAMING_SNAKE_CASE =unk_token SCREAMING_SNAKE_CASE =cls_token SCREAMING_SNAKE_CASE =pad_token SCREAMING_SNAKE_CASE =mask_token SCREAMING_SNAKE_CASE =eos_token SCREAMING_SNAKE_CASE =self.all_tokens self._create_trie(self.unique_no_split_tokens ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ): return self._id_to_token.get(snake_case ,self.unk_token ) def _lowerCAmelCase ( self : Dict ,snake_case : str ): return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) ) def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ): return text.split() def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ): return len(self._id_to_token ) def _lowerCAmelCase ( self : List[str] ): return {token: i for i, token in enumerate(self.all_tokens )} def _lowerCAmelCase ( self : List[Any] ,snake_case : str ): return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) ) def _lowerCAmelCase ( self : Any ,snake_case : int ): return self._id_to_token.get(snake_case ,self.unk_token ) def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE =[self.cls_token_id] SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1] if token_ids_a is not None: mask += [0] * len(snake_case ) + [1] return mask def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ): SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' ) with open(snake_case ,'w' ) as f: f.write('\n'.join(self.all_tokens ) ) return (vocab_file,) @property def _lowerCAmelCase ( self : int ): return self.get_vocab_size(with_added_tokens=snake_case ) def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ): return super()._add_tokens(snake_case ,special_tokens=snake_case )
334
1
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = ['input_ids', 'attention_mask'] def __init__( self : Dict ,snake_case : List[str]="</s>" ,snake_case : List[str]="<unk>" ,snake_case : Union[str, Any]="<pad>" ,snake_case : Union[str, Any]=125 ,snake_case : Union[str, Any]=None ,**snake_case : int ,): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: SCREAMING_SNAKE_CASE =[f'<extra_id_{i}>' for i in range(snake_case )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens SCREAMING_SNAKE_CASE =len(set(filter(lambda snake_case : bool('extra_id' in str(snake_case ) ) ,snake_case ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the' ' extra_ids tokens' ) SCREAMING_SNAKE_CASE =AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else pad_token SCREAMING_SNAKE_CASE =AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else eos_token SCREAMING_SNAKE_CASE =AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else unk_token super().__init__( eos_token=snake_case ,unk_token=snake_case ,pad_token=snake_case ,extra_ids=snake_case ,additional_special_tokens=snake_case ,**snake_case ,) SCREAMING_SNAKE_CASE =extra_ids SCREAMING_SNAKE_CASE =2**8 # utf is 8 bits # define special tokens dict SCREAMING_SNAKE_CASE ={ self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } SCREAMING_SNAKE_CASE =len(self.special_tokens_encoder ) SCREAMING_SNAKE_CASE =len(snake_case ) for i, token in enumerate(snake_case ): SCREAMING_SNAKE_CASE =self.vocab_size + i - n SCREAMING_SNAKE_CASE ={v: k for k, v in self.special_tokens_encoder.items()} @property def _lowerCAmelCase ( self : List[Any] ): return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def _lowerCAmelCase ( self : Any ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ,snake_case : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case ,token_ids_a=snake_case ,already_has_special_tokens=snake_case ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(snake_case )) + [1] return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1] def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ): if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated' ' eos tokens being added.' ) return token_ids else: return token_ids + [self.eos_token_id] def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE =[self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE =self._add_eos_if_not_present(snake_case ) if token_ids_a is None: return token_ids_a else: SCREAMING_SNAKE_CASE =self._add_eos_if_not_present(snake_case ) return token_ids_a + token_ids_a def _lowerCAmelCase ( self : Dict ,snake_case : str ): SCREAMING_SNAKE_CASE =[chr(snake_case ) for i in text.encode('utf-8' )] return tokens def _lowerCAmelCase ( self : str ,snake_case : Tuple ): if token in self.special_tokens_encoder: SCREAMING_SNAKE_CASE =self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: SCREAMING_SNAKE_CASE =self.added_tokens_encoder[token] elif len(snake_case ) != 1: SCREAMING_SNAKE_CASE =self.unk_token_id else: SCREAMING_SNAKE_CASE =ord(snake_case ) + self._num_special_tokens return token_id def _lowerCAmelCase ( self : List[str] ,snake_case : str ): if index in self.special_tokens_decoder: SCREAMING_SNAKE_CASE =self.special_tokens_decoder[index] else: SCREAMING_SNAKE_CASE =chr(index - self._num_special_tokens ) return token def _lowerCAmelCase ( self : str ,snake_case : Tuple ): SCREAMING_SNAKE_CASE =B'' for token in tokens: if token in self.special_tokens_decoder: SCREAMING_SNAKE_CASE =self.special_tokens_decoder[token].encode('utf-8' ) elif token in self.added_tokens_decoder: SCREAMING_SNAKE_CASE =self.special_tokens_decoder[token].encode('utf-8' ) elif token in self.special_tokens_encoder: SCREAMING_SNAKE_CASE =token.encode('utf-8' ) elif token in self.added_tokens_encoder: SCREAMING_SNAKE_CASE =token.encode('utf-8' ) else: SCREAMING_SNAKE_CASE =bytes([ord(snake_case )] ) bstring += tok_string SCREAMING_SNAKE_CASE =bstring.decode('utf-8' ,errors='ignore' ) return string def _lowerCAmelCase ( self : Dict ,snake_case : str ,snake_case : Optional[str] = None ): return ()
334
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase =logging.get_logger() @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self : int ,snake_case : Tensor ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def _lowerCAmelCase ( self : Tuple ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) ) @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 0 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) def __call__( self : int ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) ) SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( f'Numbers of operations are different. Source module has {len(snake_case )} operations while' f' destination module has {len(snake_case )}.' ) for dest_m, src_m in zip(snake_case ,snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}' ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ): """simple docstring""" print(F'Converting {name}...' ) with torch.no_grad(): SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval() SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval() SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) ) module_transfer(lowerCAmelCase_ ) assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one." SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}' print(lowerCAmelCase_ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, ) # we can use the convnext one SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, ) print(F'Pushed {checkpoint_name}' ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ): """simple docstring""" SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE =1000 SCREAMING_SNAKE_CASE =(1, num_labels) SCREAMING_SNAKE_CASE ='huggingface/label-files' SCREAMING_SNAKE_CASE =num_labels SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) ) SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =idalabel SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE ={ 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ), } if model_name: convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) return config, expected_shape if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported resnet* architecture," " currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) _lowerCamelCase =parser.parse_args() _lowerCamelCase =args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
1
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict', [ SplitDict(), SplitDict({'train': SplitInfo(name='train', num_bytes=1337, num_examples=42, dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train', num_bytes=1337, num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ], ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =split_dict._to_yaml_list() assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =SplitDict._from_yaml_list(lowerCAmelCase_ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump SCREAMING_SNAKE_CASE =None # the split name of split_dict takes over the name of the split info object SCREAMING_SNAKE_CASE =split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info', [SplitInfo(), SplitInfo(dataset_name=lowerCAmelCase_ ), SplitInfo(dataset_name='my_dataset' )] ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
334
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ): """simple docstring""" SCREAMING_SNAKE_CASE =None if token is not None: SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE ='636036' SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}' SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json() return result["workflow_runs"] def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE =workflow_run['id'] break return workflow_run_id def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE =artifacts_links[artifact_name] download_artifact( artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) SCREAMING_SNAKE_CASE ={} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' ) if os.path.isfile(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE ={} with zipfile.ZipFile(lowerCAmelCase_ ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCAmelCase_ ): # read the file with z.open(lowerCAmelCase_ ) as f: SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' ) return results
334
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'transfo-xl' __UpperCAmelCase = ['mems'] __UpperCAmelCase = { 'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,): SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =[] self.cutoffs.extend(snake_case ) if proj_share_all_but_first: SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs ) else: SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs ) SCREAMING_SNAKE_CASE =d_model SCREAMING_SNAKE_CASE =d_embed SCREAMING_SNAKE_CASE =d_head SCREAMING_SNAKE_CASE =d_inner SCREAMING_SNAKE_CASE =div_val SCREAMING_SNAKE_CASE =pre_lnorm SCREAMING_SNAKE_CASE =n_layer SCREAMING_SNAKE_CASE =n_head SCREAMING_SNAKE_CASE =mem_len SCREAMING_SNAKE_CASE =same_length SCREAMING_SNAKE_CASE =attn_type SCREAMING_SNAKE_CASE =clamp_len SCREAMING_SNAKE_CASE =sample_softmax SCREAMING_SNAKE_CASE =adaptive SCREAMING_SNAKE_CASE =dropout SCREAMING_SNAKE_CASE =dropatt SCREAMING_SNAKE_CASE =untie_r SCREAMING_SNAKE_CASE =init SCREAMING_SNAKE_CASE =init_range SCREAMING_SNAKE_CASE =proj_init_std SCREAMING_SNAKE_CASE =init_std SCREAMING_SNAKE_CASE =layer_norm_epsilon super().__init__(eos_token_id=snake_case ,**snake_case ) @property def _lowerCAmelCase ( self : str ): # Message copied from Transformer-XL documentation logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' ) return -1 @max_position_embeddings.setter def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ): # Message copied from Transformer-XL documentation raise NotImplementedError( f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
334
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class a_ ( lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,): SCREAMING_SNAKE_CASE =parent SCREAMING_SNAKE_CASE =batch_size SCREAMING_SNAKE_CASE =seq_length SCREAMING_SNAKE_CASE =is_training SCREAMING_SNAKE_CASE =use_input_mask SCREAMING_SNAKE_CASE =use_token_type_ids SCREAMING_SNAKE_CASE =use_labels SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =max_position_embeddings SCREAMING_SNAKE_CASE =type_vocab_size SCREAMING_SNAKE_CASE =type_sequence_label_size SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =num_labels SCREAMING_SNAKE_CASE =num_choices SCREAMING_SNAKE_CASE =scope SCREAMING_SNAKE_CASE =q_groups SCREAMING_SNAKE_CASE =k_groups SCREAMING_SNAKE_CASE =v_groups SCREAMING_SNAKE_CASE =post_attention_groups SCREAMING_SNAKE_CASE =intermediate_groups SCREAMING_SNAKE_CASE =output_groups def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) SCREAMING_SNAKE_CASE =None if self.use_input_mask: SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =None if self.use_labels: SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size ) SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices ) SCREAMING_SNAKE_CASE =self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCAmelCase ( self : Optional[int] ): return SqueezeBertConfig( embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,) def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ): SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ,snake_case ) SCREAMING_SNAKE_CASE =model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ): SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ): SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model( snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ): SCREAMING_SNAKE_CASE =self.num_labels SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ): SCREAMING_SNAKE_CASE =self.num_labels SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.num_choices SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() SCREAMING_SNAKE_CASE =model( snake_case ,attention_mask=snake_case ,labels=snake_case ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) __UpperCAmelCase = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = False def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self ) SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 ) def _lowerCAmelCase ( self : List[str] ): self.config_tester.run_common_tests() def _lowerCAmelCase ( self : Optional[int] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*snake_case ) def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case ) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case ) def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case ) def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case ) def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case ) @slow def _lowerCAmelCase ( self : str ): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @require_sentencepiece @require_tokenizers @require_torch class a_ ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] ) SCREAMING_SNAKE_CASE =model(snake_case )[0] SCREAMING_SNAKE_CASE =torch.Size((1, 3) ) self.assertEqual(output.shape ,snake_case ) SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] ) self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
334
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCamelCase ={ "configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =["VisionEncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =["TFVisionEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =["FlaxVisionEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
334
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = None __UpperCAmelCase = None @property def _lowerCAmelCase ( self : List[Any] ): return self.feat_extract_tester.prepare_feat_extract_dict() def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(snake_case ,'feature_size' ) ) self.assertTrue(hasattr(snake_case ,'sampling_rate' ) ) self.assertTrue(hasattr(snake_case ,'padding_value' ) ) def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case ) SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' ) SCREAMING_SNAKE_CASE =processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE =batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def _lowerCAmelCase ( self : Optional[int] ): SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case ) SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' ) SCREAMING_SNAKE_CASE =processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE =batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def _lowerCAmelCase ( self : str ): SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case ) SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' ) SCREAMING_SNAKE_CASE =processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE =batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ): def _inputs_have_equal_length(snake_case : Dict ): SCREAMING_SNAKE_CASE =len(input[0] ) for input_slice in input[1:]: if len(snake_case ) != length: return False return True def _inputs_are_equal(snake_case : str ,snake_case : Dict ): if len(snake_case ) != len(snake_case ): return False for input_slice_a, input_slice_a in zip(snake_case ,snake_case ): if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ): return False return True SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case ) SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size # test padding for List[int] + numpy SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' ) SCREAMING_SNAKE_CASE =input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(snake_case ): feat_extract.pad(snake_case ,padding='max_length' )[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' ) SCREAMING_SNAKE_CASE =input_a[input_name] self.assertFalse(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,) SCREAMING_SNAKE_CASE =input_a[input_name] self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) ) SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1e-3 ) def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ): def _inputs_have_equal_length(snake_case : str ): SCREAMING_SNAKE_CASE =len(input[0] ) for input_slice in input[1:]: if len(snake_case ) != length: return False return True def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ): if len(snake_case ) != len(snake_case ): return False for input_slice_a, input_slice_a in zip(snake_case ,snake_case ): if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ): return False return True SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case ) SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) # truncate to smallest SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ) SCREAMING_SNAKE_CASE =input_a[input_name] self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertFalse(_inputs_have_equal_length(snake_case ) ) # truncate to smallest with np SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ) SCREAMING_SNAKE_CASE =input_a[input_name] self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(snake_case ) ) # truncate to middle SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' ) SCREAMING_SNAKE_CASE =input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(snake_case ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case ): feat_extract.pad(snake_case ,truncation=snake_case )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case ): feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case ): feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(snake_case ): feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy SCREAMING_SNAKE_CASE =12 SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,) SCREAMING_SNAKE_CASE =input_a[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,) SCREAMING_SNAKE_CASE =input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of SCREAMING_SNAKE_CASE =len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertFalse(_inputs_have_equal_length(snake_case ) ) def _lowerCAmelCase ( self : Optional[int] ): self._check_padding(numpify=snake_case ) def _lowerCAmelCase ( self : Tuple ): self._check_padding(numpify=snake_case ) def _lowerCAmelCase ( self : List[str] ): self._check_truncation(numpify=snake_case ) def _lowerCAmelCase ( self : int ): self._check_truncation(numpify=snake_case ) @require_torch def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) @require_tf def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name] SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =self.feat_extract_dict SCREAMING_SNAKE_CASE =True SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs] SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' ) self.assertIn('attention_mask' ,snake_case ) self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =self.feat_extract_dict SCREAMING_SNAKE_CASE =True SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case ) SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs] SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE =min(snake_case ) SCREAMING_SNAKE_CASE =feat_extract.pad( snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' ) self.assertIn('attention_mask' ,snake_case ) self.assertListEqual( list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
334
1
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _lowerCamelCase =random.Random() def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=1.0, lowerCAmelCase_=None, lowerCAmelCase_=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE =global_rng SCREAMING_SNAKE_CASE =[] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class a_ ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict ,snake_case : str ,snake_case : List[Any]=7 ,snake_case : List[Any]=400 ,snake_case : int=2000 ,snake_case : Union[str, Any]=1 ,snake_case : str=0.0 ,snake_case : Union[str, Any]=16000 ,snake_case : List[Any]=True ,snake_case : Tuple=True ,): SCREAMING_SNAKE_CASE =parent SCREAMING_SNAKE_CASE =batch_size SCREAMING_SNAKE_CASE =min_seq_length SCREAMING_SNAKE_CASE =max_seq_length SCREAMING_SNAKE_CASE =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE =feature_size SCREAMING_SNAKE_CASE =padding_value SCREAMING_SNAKE_CASE =sampling_rate SCREAMING_SNAKE_CASE =return_attention_mask SCREAMING_SNAKE_CASE =do_normalize def _lowerCAmelCase ( self : Any ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _lowerCAmelCase ( self : str ,snake_case : Optional[Any]=False ,snake_case : Union[str, Any]=False ): def _flatten(snake_case : Optional[Any] ): return list(itertools.chain(*snake_case ) ) if equal_length: SCREAMING_SNAKE_CASE =floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE =[ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE =[np.asarray(snake_case ) for x in speech_inputs] return speech_inputs class a_ ( lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = WavaVecaFeatureExtractor def _lowerCAmelCase ( self : str ): SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractionTester(self ) def _lowerCAmelCase ( self : List[str] ,snake_case : str ): self.assertTrue(np.all(np.mean(snake_case ,axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(snake_case ,axis=0 ) - 1 ) < 1e-3 ) ) def _lowerCAmelCase ( self : Dict ): # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] SCREAMING_SNAKE_CASE =[np.asarray(snake_case ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE =feat_extract(speech_inputs[0] ,return_tensors='np' ).input_values SCREAMING_SNAKE_CASE =feat_extract(np_speech_inputs[0] ,return_tensors='np' ).input_values self.assertTrue(np.allclose(snake_case ,snake_case ,atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE =feat_extract(snake_case ,return_tensors='np' ).input_values SCREAMING_SNAKE_CASE =feat_extract(snake_case ,return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(snake_case ,snake_case ): self.assertTrue(np.allclose(snake_case ,snake_case ,atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE =np.asarray(snake_case ) SCREAMING_SNAKE_CASE =feat_extract(snake_case ,return_tensors='np' ).input_values SCREAMING_SNAKE_CASE =feat_extract(snake_case ,return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(snake_case ,snake_case ): self.assertTrue(np.allclose(snake_case ,snake_case ,atol=1e-3 ) ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] SCREAMING_SNAKE_CASE =['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE =[None, 1600, None] for max_length, padding in zip(snake_case ,snake_case ): SCREAMING_SNAKE_CASE =feat_extract(snake_case ,padding=snake_case ,max_length=snake_case ,return_tensors='np' ) SCREAMING_SNAKE_CASE =processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE =range(800 ,1400 ,200 ) SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in lengths] SCREAMING_SNAKE_CASE =['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE =[None, 1600, None] for max_length, padding in zip(snake_case ,snake_case ): SCREAMING_SNAKE_CASE =feat_extract(snake_case ,max_length=snake_case ,padding=snake_case ) SCREAMING_SNAKE_CASE =processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] SCREAMING_SNAKE_CASE =feat_extract( snake_case ,truncation=snake_case ,max_length=1000 ,padding='max_length' ,return_tensors='np' ) SCREAMING_SNAKE_CASE =processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] SCREAMING_SNAKE_CASE =feat_extract( snake_case ,truncation=snake_case ,max_length=1000 ,padding='longest' ,return_tensors='np' ) SCREAMING_SNAKE_CASE =processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] SCREAMING_SNAKE_CASE =feat_extract( snake_case ,truncation=snake_case ,max_length=2000 ,padding='longest' ,return_tensors='np' ) SCREAMING_SNAKE_CASE =processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def _lowerCAmelCase ( self : List[Any] ): import torch SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE =np.random.rand(100 ).astype(np.floataa ) SCREAMING_SNAKE_CASE =np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE =feature_extractor.pad([{'input_values': inputs}] ,return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE =feature_extractor.pad([{'input_values': inputs}] ,return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def _lowerCAmelCase ( self : List[Any] ): # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: SCREAMING_SNAKE_CASE =WavaVecaConfig.from_pretrained(snake_case ) SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained(snake_case ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask ,config.feat_extract_norm == 'layer' )
334
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _lowerCamelCase =2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _lowerCamelCase =50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _lowerCamelCase =0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] ) return (item, float(lowerCAmelCase_ )) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 ) SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:] SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ ) if random.uniform(0, 1 ) < MUTATION_PROBABILITY: SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ): """simple docstring""" SCREAMING_SNAKE_CASE =[] # Generate more children proportionally to the fitness score. SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1 SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n for _ in range(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ ) # Append new string to the population list. pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) ) pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) ) return pop def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ): """simple docstring""" if N_POPULATION < N_SELECTED: SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(lowerCAmelCase_ ) # Verify that the target contains no genes besides the ones inside genes variable. SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} ) if not_in_genes_list: SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(lowerCAmelCase_ ) # Generate random starting population. SCREAMING_SNAKE_CASE =[] for _ in range(lowerCAmelCase_ ): population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) ) # Just some logs to know what the algorithms is doing. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowerCAmelCase_ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population] # Check if there is a matching evolution. SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowerCAmelCase_ ) # Normalize population score to be between 0 and 1. SCREAMING_SNAKE_CASE =[ (item, score / len(lowerCAmelCase_ )) for item, score in population_score ] # This is selection for i in range(lowerCAmelCase_ ): population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowerCAmelCase_ ) > N_POPULATION: break if __name__ == "__main__": _lowerCamelCase =( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) _lowerCamelCase =list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
334
1
import unittest import numpy as np def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = None, ): """simple docstring""" SCREAMING_SNAKE_CASE =np.shape(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =np.shape(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =np.shape(lowerCAmelCase_ ) if shape_a[0] != shape_b[0]: SCREAMING_SNAKE_CASE =( 'Expected the same number of rows for A and B. ' F'Instead found A of size {shape_a} and B of size {shape_b}' ) raise ValueError(lowerCAmelCase_ ) if shape_b[1] != shape_c[1]: SCREAMING_SNAKE_CASE =( 'Expected the same number of columns for B and C. ' F'Instead found B of size {shape_b} and C of size {shape_c}' ) raise ValueError(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =pseudo_inv if a_inv is None: try: SCREAMING_SNAKE_CASE =np.linalg.inv(lowerCAmelCase_ ) except np.linalg.LinAlgError: raise ValueError( 'Input matrix A is not invertible. Cannot compute Schur complement.' ) return mat_c - mat_b.T @ a_inv @ mat_b class a_ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] ) SCREAMING_SNAKE_CASE =np.array([[2, 1], [6, 3]] ) SCREAMING_SNAKE_CASE =schur_complement(snake_case ,snake_case ,snake_case ) SCREAMING_SNAKE_CASE =np.block([[a, b], [b.T, c]] ) SCREAMING_SNAKE_CASE =np.linalg.det(snake_case ) SCREAMING_SNAKE_CASE =np.linalg.det(snake_case ) SCREAMING_SNAKE_CASE =np.linalg.det(snake_case ) self.assertAlmostEqual(snake_case ,det_a * det_s ) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] ) SCREAMING_SNAKE_CASE =np.array([[2, 1], [6, 3]] ) with self.assertRaises(snake_case ): schur_complement(snake_case ,snake_case ,snake_case ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] ) SCREAMING_SNAKE_CASE =np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(snake_case ): schur_complement(snake_case ,snake_case ,snake_case ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
334
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class a_ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict ): # A mock response for an HTTP head request to emulate server down SCREAMING_SNAKE_CASE =mock.Mock() SCREAMING_SNAKE_CASE =500 SCREAMING_SNAKE_CASE ={} SCREAMING_SNAKE_CASE =HTTPError SCREAMING_SNAKE_CASE ={} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head: SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def _lowerCAmelCase ( self : Optional[Any] ): # A mock response for an HTTP head request to emulate server down SCREAMING_SNAKE_CASE =mock.Mock() SCREAMING_SNAKE_CASE =500 SCREAMING_SNAKE_CASE ={} SCREAMING_SNAKE_CASE =HTTPError SCREAMING_SNAKE_CASE ={} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head: SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' ) # This check we did call the fake head request mock_head.assert_called() def _lowerCAmelCase ( self : Union[str, Any] ): # This test is for deprecated behavior and can be removed in v5 try: SCREAMING_SNAKE_CASE =tempfile.mktemp() with open(snake_case ,'wb' ) as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case ) SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case ) finally: os.remove(snake_case ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json' ,'wb' ) as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case ) SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json' ) def _lowerCAmelCase ( self : int ): # This test is for deprecated behavior and can be removed in v5 SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ) @is_staging_test class a_ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def _lowerCAmelCase ( cls : List[Any] ): SCREAMING_SNAKE_CASE =TOKEN HfFolder.save_token(snake_case ) @classmethod def _lowerCAmelCase ( cls : Tuple ): try: delete_repo(token=cls._token ,repo_id='test-tokenizer' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' ) except HTTPError: pass def _lowerCAmelCase ( self : Any ): with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' ) with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE =BertTokenizer(snake_case ) tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id='test-tokenizer' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def _lowerCAmelCase ( self : Optional[Any] ): with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' ) with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE =BertTokenizer(snake_case ) tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def _lowerCAmelCase ( self : str ): CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' ) with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case ) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' ) with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case ) bert_tokenizer.save_pretrained(snake_case ) SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case ) tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token ) SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' ) SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained( f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' ) class a_ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =Trie() trie.add('Hello 友達' ) self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} ) trie.add('Hello' ) trie.data self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} ) def _lowerCAmelCase ( self : Optional[int] ): SCREAMING_SNAKE_CASE =Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] ) trie.add('[CLS]' ) trie.add('extra_id_1' ) trie.add('extra_id_100' ) self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =Trie() trie.add('A' ) self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] ) self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =Trie() trie.add('TOKEN]' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] ) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =Trie() trie.add('A' ) trie.add('P' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] ) def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =Trie() trie.add('AB' ) trie.add('B' ) trie.add('C' ) self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] ) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =Trie() trie.add('ABC' ) trie.add('B' ) trie.add('CD' ) self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] ) def _lowerCAmelCase ( self : Optional[Any] ): # Even if the offsets are wrong, we necessarily output correct string # parts. SCREAMING_SNAKE_CASE =Trie() SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(snake_case ,['AB', 'C'] )
334
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor _lowerCamelCase =logging.get_logger(__name__) class a_ ( lowerCamelCase_ ): """simple docstring""" def __init__( self : Dict ,*snake_case : int ,**snake_case : Dict ): warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' ,snake_case ,) super().__init__(*snake_case ,**snake_case )
334
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase =[ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] _lowerCamelCase =[ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' ) return sd def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE =OrderedDict() SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE =key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] ) SCREAMING_SNAKE_CASE =d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias'] return new_d @torch.no_grad() def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE ='pretraining' if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024} else: raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512} SCREAMING_SNAKE_CASE ='multichoice' elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048} SCREAMING_SNAKE_CASE ='vqa_advanced' elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129} SCREAMING_SNAKE_CASE ='vqa' elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE ={ 'visual_embedding_dim': 1024, 'num_labels': 2, } SCREAMING_SNAKE_CASE ='nlvr' SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ ) # Load State Dict SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ ) if model_type == "pretraining": SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ ) elif model_type == "vqa": SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) # Save Checkpoints Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") _lowerCamelCase =parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
334
1