code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : List[str] = """autoformer""" a__ : Optional[Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : int = 3 , **SCREAMING_SNAKE_CASE__ : Any , ) -> Union[str, Any]: # time series specific configuration __lowerCamelCase = prediction_length __lowerCamelCase = context_length if context_length is not None else prediction_length __lowerCamelCase = distribution_output __lowerCamelCase = loss __lowerCamelCase = input_size __lowerCamelCase = num_time_features __lowerCamelCase = lags_sequence __lowerCamelCase = scaling __lowerCamelCase = num_dynamic_real_features __lowerCamelCase = num_static_real_features __lowerCamelCase = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) __lowerCamelCase = cardinality else: __lowerCamelCase = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) __lowerCamelCase = embedding_dimension else: __lowerCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] __lowerCamelCase = num_parallel_samples # Transformer architecture configuration __lowerCamelCase = input_size * len(self.lags_sequence ) + self._number_of_features __lowerCamelCase = d_model __lowerCamelCase = encoder_attention_heads __lowerCamelCase = decoder_attention_heads __lowerCamelCase = encoder_ffn_dim __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = encoder_layers __lowerCamelCase = decoder_layers __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = encoder_layerdrop __lowerCamelCase = decoder_layerdrop __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = use_cache # Autoformer __lowerCamelCase = label_length __lowerCamelCase = moving_average __lowerCamelCase = autocorrelation_factor super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __A ( self : List[str] ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
339
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ : str = "" SCREAMING_SNAKE_CASE__ : Any = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def __magic_name__ ( ) -> None: __lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) print('''Processing...''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for index, image in enumerate(__lowerCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase = random_chars(32 ) __lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' ) __lowerCamelCase = [] for anno in new_annos[index]: __lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__lowerCAmelCase ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]: __lowerCamelCase = [] __lowerCamelCase = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ): __lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__lowerCAmelCase ) as in_file: __lowerCamelCase = in_file.readlines() __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' ) __lowerCamelCase = [] for obj_list in obj_lists: __lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for idx in range(len(__lowerCAmelCase ) ): __lowerCamelCase = [] __lowerCamelCase = img_list[idx] path_list.append(__lowerCAmelCase ) __lowerCamelCase = anno_list[idx] __lowerCamelCase = cva.imread(__lowerCAmelCase ) if flip_type == 1: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCAmelCase ) new_imgs_list.append(__lowerCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
339
1
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : Union[str, Any] = RobertaTokenizer a__ : Optional[Any] = RobertaTokenizerFast a__ : List[str] = True a__ : Optional[Any] = {"""cls_token""": """<s>"""} def __A ( self : Union[str, Any] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCamelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) def __A ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> int: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]: __lowerCamelCase = '''lower newer''' __lowerCamelCase = '''lower newer''' return input_text, output_text def __A ( self : Tuple ) -> List[Any]: __lowerCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) # , add_prefix_space=True) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokens + [tokenizer.unk_token] __lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] ) -> Dict: __lowerCamelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = self.tokenizer_class.from_pretrained('''roberta-base''' ) __lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __A ( self : str ) -> Optional[Any]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = '''Encode this sequence.''' __lowerCamelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __lowerCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __lowerCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing spaces after special tokens __lowerCamelCase = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )} ) # mask token has a left space __lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''Encode <mask> sequence''' __lowerCamelCase = '''Encode <mask>sequence''' __lowerCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = encoded.index(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = encoded.index(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] ) -> Any: pass def __A ( self : Tuple ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __lowerCamelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''A, <mask> AllenNLP sentence.''' __lowerCamelCase = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def __A ( self : Optional[int] ) -> Optional[int]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , SCREAMING_SNAKE_CASE__ ) self.assertEqual(post_processor_state['''add_prefix_space'''] , SCREAMING_SNAKE_CASE__ ) self.assertEqual(post_processor_state['''trim_offsets'''] , SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __lowerCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __lowerCamelCase = f'''{text_of_1_token} {text_of_1_token}''' __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ) + 1, len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ) + 1, len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) __lowerCamelCase = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ) + 1, 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ), 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ), 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
339
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( __lowerCAmelCase : Any ) -> int: __lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0] @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(rows * cols * num_images ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) __lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) return data @deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict: __lowerCamelCase = labels_dense.shape[0] __lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes __lowerCamelCase = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase = 1 return labels_one_hot @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(__lowerCAmelCase ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase ) return labels class lowerCAmelCase__ : @deprecated( SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: __lowerCamelCase = 1_00_00 __lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' __lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase = images.astype(numpy.floataa ) __lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 ) __lowerCamelCase = images __lowerCamelCase = labels __lowerCamelCase = 0 __lowerCamelCase = 0 @property def __A ( self : str ) -> Optional[int]: return self._images @property def __A ( self : Any ) -> Dict: return self._labels @property def __A ( self : List[Any] ) -> int: return self._num_examples @property def __A ( self : str ) -> Any: return self._epochs_completed def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str: if fake_data: __lowerCamelCase = [1] * 7_84 __lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE__ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE__ )], ) __lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perma] __lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase = self._num_examples - start __lowerCamelCase = self._images[start : self._num_examples] __lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perm] __lowerCamelCase = self.labels[perm] # Start next epoch __lowerCamelCase = 0 __lowerCamelCase = batch_size - rest_num_examples __lowerCamelCase = self._index_in_epoch __lowerCamelCase = self._images[start:end] __lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: if not gfile.Exists(__lowerCAmelCase ): gfile.MakeDirs(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if not gfile.Exists(__lowerCAmelCase ): urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310 with gfile.GFile(__lowerCAmelCase ) as f: __lowerCamelCase = f.size() print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' ) return filepath @deprecated( __lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase ) __lowerCamelCase = fake() __lowerCamelCase = fake() __lowerCamelCase = fake() return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase ) if not source_url: # empty string check __lowerCamelCase = DEFAULT_SOURCE_URL __lowerCamelCase = '''train-images-idx3-ubyte.gz''' __lowerCamelCase = '''train-labels-idx1-ubyte.gz''' __lowerCamelCase = '''t10k-images-idx3-ubyte.gz''' __lowerCamelCase = '''t10k-labels-idx1-ubyte.gz''' __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) if not 0 <= validation_size <= len(__lowerCAmelCase ): __lowerCamelCase = ( '''Validation size should be between 0 and ''' f'''{len(__lowerCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(__lowerCAmelCase ) __lowerCamelCase = train_images[:validation_size] __lowerCamelCase = train_labels[:validation_size] __lowerCamelCase = train_images[validation_size:] __lowerCamelCase = train_labels[validation_size:] __lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
339
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) @add_end_docstrings(__lowercase ) class lowerCAmelCase__ ( __lowercase ): def __init__( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]: super().__init__(**SCREAMING_SNAKE_CASE__ ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , '''vision''' ) self.check_model_type(SCREAMING_SNAKE_CASE__ ) def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, "Image.Image", List[Dict[str, Any]]] , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> int: if "text_queries" in kwargs: __lowerCamelCase = kwargs.pop('''text_queries''' ) if isinstance(SCREAMING_SNAKE_CASE__ , (str, Image.Image) ): __lowerCamelCase = {'''image''': image, '''candidate_labels''': candidate_labels} else: __lowerCamelCase = image __lowerCamelCase = super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) return results def __A ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int: __lowerCamelCase = {} if "threshold" in kwargs: __lowerCamelCase = kwargs['''threshold'''] if "top_k" in kwargs: __lowerCamelCase = kwargs['''top_k'''] return {}, {}, postprocess_params def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]: __lowerCamelCase = load_image(inputs['''image'''] ) __lowerCamelCase = inputs['''candidate_labels'''] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = candidate_labels.split(''',''' ) __lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=self.framework ) __lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=self.framework ) yield { "is_last": i == len(SCREAMING_SNAKE_CASE__ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict: __lowerCamelCase = model_inputs.pop('''target_size''' ) __lowerCamelCase = model_inputs.pop('''candidate_label''' ) __lowerCamelCase = model_inputs.pop('''is_last''' ) __lowerCamelCase = self.model(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs} return model_outputs def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : str=None ) -> Union[str, Any]: __lowerCamelCase = [] for model_output in model_outputs: __lowerCamelCase = model_output['''candidate_label'''] __lowerCamelCase = BaseModelOutput(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.image_processor.post_process_object_detection( outputs=SCREAMING_SNAKE_CASE__ , threshold=SCREAMING_SNAKE_CASE__ , target_sizes=model_output['''target_size'''] )[0] for index in outputs["scores"].nonzero(): __lowerCamelCase = outputs['''scores'''][index].item() __lowerCamelCase = self._get_bounding_box(outputs['''boxes'''][index][0] ) __lowerCamelCase = {'''score''': score, '''label''': label, '''box''': box} results.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x["score"] , reverse=SCREAMING_SNAKE_CASE__ ) if top_k: __lowerCamelCase = results[:top_k] return results def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : "torch.Tensor" ) -> Dict[str, int]: if self.framework != "pt": raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = box.int().tolist() __lowerCamelCase = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
339
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } SCREAMING_SNAKE_CASE__ : Dict = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = VOCAB_FILES_NAMES a__ : Any = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Optional[Any] = SqueezeBertTokenizer def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = do_lower_case def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str: __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: __lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
339
1
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[int] = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = """t5""" a__ : List[Any] = ["""past_key_values"""] a__ : List[str] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=3_21_28 , SCREAMING_SNAKE_CASE__ : Any=5_12 , SCREAMING_SNAKE_CASE__ : str=64 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_28 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : int=1e-6 , SCREAMING_SNAKE_CASE__ : str=1.0 , SCREAMING_SNAKE_CASE__ : Optional[int]="relu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : int=1 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Optional[int]: __lowerCamelCase = vocab_size __lowerCamelCase = d_model __lowerCamelCase = d_kv __lowerCamelCase = d_ff __lowerCamelCase = num_layers __lowerCamelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __lowerCamelCase = num_heads __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = relative_attention_max_distance __lowerCamelCase = dropout_rate __lowerCamelCase = layer_norm_epsilon __lowerCamelCase = initializer_factor __lowerCamelCase = feed_forward_proj __lowerCamelCase = use_cache __lowerCamelCase = self.feed_forward_proj.split('''-''' ) __lowerCamelCase = act_info[-1] __lowerCamelCase = act_info[0] == '''gated''' if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __lowerCamelCase = '''gelu_new''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: __lowerCamelCase = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: __lowerCamelCase = '''past_encoder_sequence + sequence''' __lowerCamelCase = {0: '''batch'''} __lowerCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} __lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='''inputs''' ) return common_inputs @property def __A ( self : int ) -> int: return 13
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool: return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ) -> List[Any]: __lowerCamelCase = multiprocessing.Manager() __lowerCamelCase = manager.list() __lowerCamelCase = multiprocessing.Process(target=__lowerCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('''timed out''' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[str] ) -> Dict: with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil __lowerCamelCase = shutil.rmtree __lowerCamelCase = os.rmdir __lowerCamelCase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: __lowerCamelCase = {} with swallow_io(): with time_limit(__lowerCAmelCase ): exec(__lowerCAmelCase , __lowerCAmelCase ) result.append('''passed''' ) except TimeoutException: result.append('''timed out''' ) except BaseException as e: result.append(f'''failed: {e}''' ) # Needed for cleaning up. __lowerCamelCase = rmtree __lowerCamelCase = rmdir __lowerCamelCase = chdir @contextlib.contextmanager def __magic_name__ ( __lowerCAmelCase : Any ) -> str: def signal_handler(__lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ): raise TimeoutException('''Timed out!''' ) signal.setitimer(signal.ITIMER_REAL , __lowerCAmelCase ) signal.signal(signal.SIGALRM , __lowerCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def __magic_name__ ( ) -> List[str]: __lowerCamelCase = WriteOnlyStringIO() with contextlib.redirect_stdout(__lowerCAmelCase ): with contextlib.redirect_stderr(__lowerCAmelCase ): with redirect_stdin(__lowerCAmelCase ): yield @contextlib.contextmanager def __magic_name__ ( ) -> Tuple: with tempfile.TemporaryDirectory() as dirname: with chdir(__lowerCAmelCase ): yield dirname class lowerCAmelCase__ ( __lowercase ): pass class lowerCAmelCase__ ( io.StringIO ): def __A ( self : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]: raise OSError def __A ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int: raise OSError def __A ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : str ) -> str: raise OSError def __A ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]: return False class lowerCAmelCase__ ( contextlib._RedirectStream ): # type: ignore a__ : Dict = """stdin""" @contextlib.contextmanager def __magic_name__ ( __lowerCAmelCase : List[str] ) -> List[Any]: if root == ".": yield return __lowerCamelCase = os.getcwd() os.chdir(__lowerCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(__lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : Optional[int]=None ) -> str: if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins __lowerCamelCase = None __lowerCamelCase = None import os __lowerCamelCase = '''1''' __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None import shutil __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None import subprocess __lowerCamelCase = None # type: ignore __lowerCamelCase = None import sys __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Dict = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
1
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : Tuple = FlaxAutoencoderKL @property def __A ( self : Tuple ) -> Optional[int]: __lowerCamelCase = 4 __lowerCamelCase = 3 __lowerCamelCase = (32, 32) __lowerCamelCase = jax.random.PRNGKey(0 ) __lowerCamelCase = jax.random.uniform(SCREAMING_SNAKE_CASE__ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def __A ( self : Optional[int] ) -> List[Any]: __lowerCamelCase = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } __lowerCamelCase = self.dummy_input return init_dict, inputs_dict
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. __lowerCamelCase , __lowerCamelCase = y, x % y return abs(__lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: try: __lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __lowerCamelCase = int(nums[0] ) __lowerCamelCase = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
339
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Union[str, Any] ) -> str: __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str: return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> int: return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Tuple: shutil.rmtree(self.tmpdirname ) def __A ( self : List[str] ) -> Any: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] ) -> Optional[int]: __lowerCamelCase = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Tuple: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : Optional[Any] ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[str] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Optional[Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
339
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids __lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids __lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits __lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean() __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
339
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : str = ShapEImgaImgPipeline a__ : Union[str, Any] = ["""image"""] a__ : Optional[int] = ["""image"""] a__ : Union[str, Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a__ : List[str] = False @property def __A ( self : Dict ) -> Optional[Any]: return 32 @property def __A ( self : Optional[int] ) -> Optional[int]: return 32 @property def __A ( self : Optional[int] ) -> List[Any]: return self.time_input_dim * 4 @property def __A ( self : str ) -> List[Any]: return 8 @property def __A ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __A ( self : Dict ) -> int: torch.manual_seed(0 ) __lowerCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Tuple ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE__ ) return model def __A ( self : Optional[int] ) -> List[str]: __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , ) __lowerCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> int: __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : str ) -> Tuple: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : str ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : str ) -> Union[str, Any]: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) __lowerCamelCase = pipe( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = "bart" SCREAMING_SNAKE_CASE__ : Dict = True @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> str: if LOAD_DENSE_INDEX: __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __lowerCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __lowerCamelCase = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __lowerCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __lowerCamelCase = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> Optional[int]: if LOAD_DENSE_INDEX: __lowerCamelCase = faiss.StandardGpuResources() __lowerCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __lowerCamelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase = faiss.IndexFlatIP(128 ) __lowerCamelCase = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase ) wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase = (None, None) __lowerCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> List[str]: __lowerCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __lowerCamelCase = elia['''train_eli5'''] __lowerCamelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __lowerCamelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__lowerCAmelCase ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_indexes() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = load_models() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_train_data() def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=10 ) -> List[str]: __lowerCamelCase = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = [elia_train[int(__lowerCAmelCase )] for i in I[0]] return nn_examples def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict="wiki40b" , __lowerCAmelCase : Any="dense" , __lowerCAmelCase : Dict=10 ) -> Union[str, Any]: if source == "none": __lowerCamelCase , __lowerCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase = query_qa_dense_index( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: __lowerCamelCase , __lowerCamelCase = query_es_index( __lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , ) __lowerCamelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __lowerCamelCase = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __lowerCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None), } ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=64 , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[Any]=0.95 , __lowerCAmelCase : List[Any]=0.8 ) -> Any: with torch.no_grad(): __lowerCamelCase = qa_sas_generate( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar SCREAMING_SNAKE_CASE__ : List[str] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" SCREAMING_SNAKE_CASE__ : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE__ : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE__ : str = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.checkbox("Demo options") if demo_options: SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.selectbox( "", action_list, index=3, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = action_list.index(action_st) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = show_type == "Show full text of passages" else: SCREAMING_SNAKE_CASE__ : Any = 3 SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options") if retrieval_options: SCREAMING_SNAKE_CASE__ : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: SCREAMING_SNAKE_CASE__ : List[str] = "wiki40b" SCREAMING_SNAKE_CASE__ : Optional[Any] = "dense" SCREAMING_SNAKE_CASE__ : str = "beam" SCREAMING_SNAKE_CASE__ : List[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 64 SCREAMING_SNAKE_CASE__ : List[Any] = 256 SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.checkbox("Generation options") if generate_options: SCREAMING_SNAKE_CASE__ : Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE__ : str = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Dict = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # start main text SCREAMING_SNAKE_CASE__ : Any = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] SCREAMING_SNAKE_CASE__ : List[str] = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.text_input("Enter your question here:", "") else: SCREAMING_SNAKE_CASE__ : str = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_support(question, source=wiki_source, method="dense", n_results=10) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = make_support(question, source=wiki_source, method="sparse", n_results=10) SCREAMING_SNAKE_CASE__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE__ : Optional[Any] = support_list[:10] SCREAMING_SNAKE_CASE__ : Tuple = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) SCREAMING_SNAKE_CASE__ : Tuple = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE__ : Union[str, Any] = "[{}]({})".format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE__ : Dict = sec_titles.split(" & ") SCREAMING_SNAKE_CASE__ : int = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE__ : Any = find_nearest_training(question) SCREAMING_SNAKE_CASE__ : List[Any] = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) SCREAMING_SNAKE_CASE__ : List[Any] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
339
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> Dict: __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: shutil.rmtree(self.tmpdirname ) def __A ( self : str ) -> Any: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : List[Any] ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Optional[Any] ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Dict = """xmod""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout __lowerCamelCase = pre_norm __lowerCamelCase = adapter_reduction_factor __lowerCamelCase = adapter_layer_norm __lowerCamelCase = adapter_reuse_layer_norm __lowerCamelCase = ln_before_adapter __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = default_language class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
339
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : def __init__( self : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : int=10 , SCREAMING_SNAKE_CASE__ : Any=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__ : str=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Tuple=None , ) -> int: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = embeddings_size __lowerCamelCase = hidden_sizes __lowerCamelCase = depths __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_act __lowerCamelCase = num_labels __lowerCamelCase = scope __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple ) -> List[Any]: __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def __A ( self : Union[str, Any] ) -> Tuple: return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: __lowerCamelCase = TFResNetModel(config=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]: __lowerCamelCase = self.num_labels __lowerCamelCase = TFResNetForImageClassification(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : int ) -> Union[str, Any]: __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ): a__ : str = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () a__ : Tuple = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) a__ : List[str] = False a__ : List[str] = False a__ : Dict = False a__ : Tuple = False a__ : str = False def __A ( self : Any ) -> int: __lowerCamelCase = TFResNetModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __A ( self : Union[str, Any] ) -> int: return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def __A ( self : Tuple ) -> Union[str, Any]: pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def __A ( self : Optional[int] ) -> Optional[int]: pass def __A ( self : List[str] ) -> Tuple: __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple ) -> List[str]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple ) -> int: def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ): __lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowerCamelCase = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __lowerCamelCase = layer_type __lowerCamelCase = True check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Dict: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ ) @slow def __A ( self : Dict ) -> int: for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TFResNetModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def __magic_name__ ( ) -> Union[str, Any]: __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __A ( self : int ) -> str: return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __A ( self : str ) -> Optional[int]: __lowerCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''tf''' ) # forward pass __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ ) # verify the logits __lowerCamelCase = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
339
from collections import namedtuple import requests from lxml import html # type: ignore SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
339
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = """sew""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_68 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : int=30_72 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-5 , SCREAMING_SNAKE_CASE__ : int="group" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[str]=1_28 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=0.05 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=10 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : str="mean" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_56 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : Any=2 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_size __lowerCamelCase = feat_extract_norm __lowerCamelCase = feat_extract_activation __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = conv_bias __lowerCamelCase = num_conv_pos_embeddings __lowerCamelCase = num_conv_pos_embedding_groups __lowerCamelCase = len(self.conv_dim ) __lowerCamelCase = num_hidden_layers __lowerCamelCase = intermediate_size __lowerCamelCase = squeeze_factor __lowerCamelCase = hidden_act __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = feat_proj_dropout __lowerCamelCase = final_dropout __lowerCamelCase = layerdrop __lowerCamelCase = layer_norm_eps __lowerCamelCase = initializer_range __lowerCamelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks # ctc loss __lowerCamelCase = ctc_loss_reduction __lowerCamelCase = ctc_zero_infinity # sequence classification __lowerCamelCase = use_weighted_layer_sum __lowerCamelCase = classifier_proj_size @property def __A ( self : Optional[int] ) -> Union[str, Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
339
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) a__ : Optional[str] = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) a__ : Optional[int] = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} ) a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} ) a__ : bool = field( default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict: logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) ) def __magic_name__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__lowerCAmelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __lowerCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __lowerCamelCase = SeqaSeqDataset # Get datasets __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __lowerCamelCase = ( build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None ) __lowerCamelCase = SeqaSeqTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator( __lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) __lowerCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __lowerCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __lowerCamelCase = train_result.metrics __lowerCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) __lowerCamelCase = data_args.n_val __lowerCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' ) __lowerCamelCase = test_output.metrics __lowerCamelCase = data_args.n_test if trainer.is_world_process_zero(): __lowerCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.predict_with_generate: __lowerCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) __lowerCamelCase = lmap(str.strip , __lowerCAmelCase ) write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : List[Any] = { "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any = [ "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", "GPTBigCodeModel", "GPTBigCodePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = ScoreSdeVeScheduler() __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Tuple ) -> str: __lowerCamelCase = '''google/ncsnpp-church-256''' __lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowerCAmelCase__ ( __lowercase ): a__ : Optional[Any] = """char""" a__ : int = """bpe""" a__ : Tuple = """wp""" SCREAMING_SNAKE_CASE__ : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowerCAmelCase__ ( __lowercase ): a__ : int = ["""image_processor""", """char_tokenizer"""] a__ : str = """ViTImageProcessor""" a__ : str = """MgpstrTokenizer""" def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict: __lowerCamelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = kwargs.pop('''feature_extractor''' ) __lowerCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) __lowerCamelCase = tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained('''gpt2''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]: if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: __lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if text is not None: __lowerCamelCase = self.char_tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if text is None: return inputs elif images is None: return encodings else: __lowerCamelCase = encodings['''input_ids'''] return inputs def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = sequences __lowerCamelCase = char_preds.size(0 ) __lowerCamelCase , __lowerCamelCase = self._decode_helper(SCREAMING_SNAKE_CASE__ , '''char''' ) __lowerCamelCase , __lowerCamelCase = self._decode_helper(SCREAMING_SNAKE_CASE__ , '''bpe''' ) __lowerCamelCase , __lowerCamelCase = self._decode_helper(SCREAMING_SNAKE_CASE__ , '''wp''' ) __lowerCamelCase = [] __lowerCamelCase = [] for i in range(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]] __lowerCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]] __lowerCamelCase = scores.index(max(SCREAMING_SNAKE_CASE__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __lowerCamelCase = {} __lowerCamelCase = final_strs __lowerCamelCase = final_scores __lowerCamelCase = char_strs __lowerCamelCase = bpe_strs __lowerCamelCase = wp_strs return out def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]: if format == DecodeType.CHARACTER: __lowerCamelCase = self.char_decode __lowerCamelCase = 1 __lowerCamelCase = '''[s]''' elif format == DecodeType.BPE: __lowerCamelCase = self.bpe_decode __lowerCamelCase = 2 __lowerCamelCase = '''#''' elif format == DecodeType.WORDPIECE: __lowerCamelCase = self.wp_decode __lowerCamelCase = 1_02 __lowerCamelCase = '''[SEP]''' else: raise ValueError(f'''Format {format} is not supported.''' ) __lowerCamelCase , __lowerCamelCase = [], [] __lowerCamelCase = pred_logits.size(0 ) __lowerCamelCase = pred_logits.size(1 ) __lowerCamelCase , __lowerCamelCase = pred_logits.topk(1 , dim=-1 , largest=SCREAMING_SNAKE_CASE__ , sorted=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = preds_index.view(-1 , SCREAMING_SNAKE_CASE__ )[:, 1:] __lowerCamelCase = decoder(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase = torch.nn.functional.softmax(SCREAMING_SNAKE_CASE__ , dim=2 ).max(dim=2 ) __lowerCamelCase = preds_max_prob[:, 1:] for index in range(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = preds_str[index].find(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = preds_str[index][:pred_eos] __lowerCamelCase = preds_index[index].cpu().tolist() __lowerCamelCase = pred_index.index(SCREAMING_SNAKE_CASE__ ) if eos_token in pred_index else -1 __lowerCamelCase = preds_max_prob[index][: pred_eos_index + 1] __lowerCamelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(SCREAMING_SNAKE_CASE__ ) conf_scores.append(SCREAMING_SNAKE_CASE__ ) return dec_strs, conf_scores def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )] return decode_strs def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: return self.bpe_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str: __lowerCamelCase = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )] return decode_strs
339
from functools import lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> set: __lowerCamelCase = 2 __lowerCamelCase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__lowerCAmelCase ) if n > 1: factors.add(__lowerCAmelCase ) return factors @lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> int: return len(unique_prime_factors(__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : list ) -> bool: return len(set(__lowerCAmelCase ) ) in (0, 1) def __magic_name__ ( __lowerCAmelCase : int ) -> list: __lowerCamelCase = 2 while True: # Increment each value of a generated range __lowerCamelCase = [base + i for i in range(__lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. __lowerCamelCase = [upf_len(__lowerCAmelCase ) for x in group] checker.append(__lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(__lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def __magic_name__ ( __lowerCAmelCase : int = 4 ) -> int: __lowerCamelCase = run(__lowerCAmelCase ) return results[0] if len(__lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
339
1
from __future__ import annotations SCREAMING_SNAKE_CASE__ : Dict = 8.988E9 # units = N * m^s * C^-2 def __magic_name__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> dict[str, float]: __lowerCamelCase = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: __lowerCamelCase = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __lowerCamelCase = abs(__lowerCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __lowerCamelCase = abs(__lowerCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __lowerCamelCase = (COULOMBS_CONSTANT * charge_product / abs(__lowerCAmelCase )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
339
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def __A ( self : Any ) -> Tuple: return TaConfig.from_pretrained('''google/umt5-base''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]: if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, input_dict def __A ( self : Tuple ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __A ( self : Optional[Any] ) -> Any: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : List[Any] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval() # first forward pass __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval() __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() ) @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ : Tuple = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ : int = True a__ : int = False a__ : Tuple = False a__ : Optional[int] = True a__ : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ : Tuple = [0.8, 0.9] def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __A ( self : Union[str, Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Any: __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() model.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __A ( self : Tuple ) -> Optional[Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(__file__).resolve().parents[3] / "src" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) SCREAMING_SNAKE_CASE__ : str = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"} SCREAMING_SNAKE_CASE__ : int = "zero2" SCREAMING_SNAKE_CASE__ : Union[str, Any] = "zero3" SCREAMING_SNAKE_CASE__ : Tuple = [ZEROa, ZEROa] def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Dict: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __lowerCamelCase = parameterized.to_safe_name('''_'''.join(str(__lowerCAmelCase ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test SCREAMING_SNAKE_CASE__ : int = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCAmelCase__ ( __lowercase ): @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) @require_torch_multi_gpu @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]: self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) @require_torch_multi_gpu @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]: self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Dict: # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Tuple: __lowerCamelCase = models[model] __lowerCamelCase = self.run_trainer( stage=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , eval_steps=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) self.do_checks(SCREAMING_SNAKE_CASE__ ) return output_dir def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Optional[Any]: __lowerCamelCase = self.get_auto_remove_tmp_dir('''./xxx''' , after=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = f''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(SCREAMING_SNAKE_CASE__ )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __lowerCamelCase = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() __lowerCamelCase = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] __lowerCamelCase = self.get_launcher(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() ) return output_dir def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> str: # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) __lowerCamelCase = min(2 , get_gpu_count() ) if distributed else 1 return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
339
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Union[str, Any] = """open-llama""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict: __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = rms_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_dropout_prob __lowerCamelCase = use_stable_embedding __lowerCamelCase = shared_input_output_embedding __lowerCamelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) __lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
339
1
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers SCREAMING_SNAKE_CASE__ : Optional[Any] = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py') def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=None ) -> str: require_version(deps[pkg] , __lowerCAmelCase )
339
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY") SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL") @dataclass(frozen=__lowercase , slots=__lowercase ) class lowerCAmelCase__ ( Generic[KEY, VAL] ): a__ : KEY a__ : VAL class lowerCAmelCase__ ( _Item ): def __init__( self : str ) -> None: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __bool__( self : Tuple ) -> bool: return False SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem() class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ): def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None: __lowerCamelCase = initial_block_size __lowerCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowerCamelCase = capacity_factor __lowerCamelCase = 0 def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int: return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int: return (ind + 1) % len(self._buckets ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool: __lowerCamelCase = self._buckets[ind] if not stored: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self._len += 1 return True elif stored.key == key: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return True else: return False def __A ( self : Any ) -> bool: __lowerCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False __lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None: __lowerCamelCase = self._buckets __lowerCamelCase = [None] * new_size __lowerCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __A ( self : str ) -> None: self._resize(len(self._buckets ) * 2 ) def __A ( self : Dict ) -> None: self._resize(len(self._buckets ) // 2 ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]: __lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ ) for _ in range(len(self._buckets ) ): yield ind __lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): break def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE__ ) if item is _deleted: continue if item.key == key: __lowerCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE__ ) def __len__( self : int ) -> int: return self._len def __iter__( self : Tuple ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ) -> str: __lowerCamelCase = ''' ,'''.join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
339
1
SCREAMING_SNAKE_CASE__ : int = [0, 2, 4, 6, 8] SCREAMING_SNAKE_CASE__ : Dict = [1, 3, 5, 7, 9] def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> int: if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 __lowerCamelCase = 0 for digit in range(10 ): __lowerCamelCase = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , __lowerCAmelCase , __lowerCAmelCase ) return result __lowerCamelCase = 0 for digita in range(10 ): __lowerCamelCase = digita if (remainder + digita) % 2 == 0: __lowerCamelCase = ODD_DIGITS else: __lowerCamelCase = EVEN_DIGITS for digita in other_parity_digits: __lowerCamelCase = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , __lowerCAmelCase , __lowerCAmelCase , ) return result def __magic_name__ ( __lowerCAmelCase : int = 9 ) -> int: __lowerCamelCase = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__lowerCAmelCase , 0 , [0] * length , __lowerCAmelCase ) return result if __name__ == "__main__": print(F'{solution() = }')
339
from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE__ : Any = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def __magic_name__ ( ) -> Any: __lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCamelCase = g.get_repo('''huggingface/transformers''' ) __lowerCamelCase = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) __lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
339
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json" ), }, "merges_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { "allenai/longformer-base-4096": 4_096, "allenai/longformer-large-4096": 4_096, "allenai/longformer-large-4096-finetuned-triviaqa": 4_096, "allenai/longformer-base-4096-extra.pos.embd.only": 4_096, "allenai/longformer-large-4096-extra.pos.embd.only": 4_096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __magic_name__ ( ) -> int: __lowerCamelCase = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __lowerCamelCase = bs[:] __lowerCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowerCAmelCase ) cs.append(2**8 + n ) n += 1 __lowerCamelCase = [chr(__lowerCAmelCase ) for n in cs] return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : str ) -> Dict: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char return pairs class lowerCAmelCase__ ( __lowercase ): a__ : int = VOCAB_FILES_NAMES a__ : Dict = PRETRAINED_VOCAB_FILES_MAP a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]="replace" , SCREAMING_SNAKE_CASE__ : Dict="<s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE__ : int , ) -> Tuple: __lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token __lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token __lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token __lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token __lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token __lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} __lowerCamelCase = errors # how to handle errors in decoding __lowerCamelCase = bytes_to_unicode() __lowerCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} __lowerCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowerCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def __A ( self : Any ) -> Optional[int]: return len(self.encoder ) def __A ( self : List[str] ) -> Any: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: if token in self.cache: return self.cache[token] __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: return token while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowerCamelCase = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word return word def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> Any: __lowerCamelCase = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) return bpe_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]: return self.decoder.get(SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: __lowerCamelCase = ''''''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] __lowerCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=False , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict: __lowerCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()): __lowerCamelCase = ''' ''' + text return (text, kwargs)
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=30 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=10 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , ) -> Tuple: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = num_patches + 1 def __A ( self : Optional[Any] ) -> Dict: __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , ) return config, pixel_values def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]: __lowerCamelCase = FlaxViTModel(config=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase = (self.image_size, self.image_size) __lowerCamelCase = (self.patch_size, self.patch_size) __lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: __lowerCamelCase = self.type_sequence_label_size __lowerCamelCase = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCamelCase = 1 __lowerCamelCase = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : List[Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def __A ( self : Optional[Any] ) -> None: __lowerCamelCase = FlaxViTModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def __A ( self : Tuple ) -> Union[str, Any]: self.config_tester.run_common_tests() def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def __A ( self : int ) -> List[str]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ): return model(pixel_values=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with self.subTest('''JIT Enabled''' ): __lowerCamelCase = model_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __lowerCamelCase = model_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __A ( self : Union[str, Any] ) -> Tuple: for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('''google/vit-base-patch16-224''' ) __lowerCamelCase = model(np.ones((1, 3, 2_24, 2_24) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
339
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> Dict: __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: shutil.rmtree(self.tmpdirname ) def __A ( self : str ) -> Any: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : List[Any] ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Optional[Any] ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class lowerCAmelCase__ ( __lowercase ): a__ : torch.FloatTensor class lowerCAmelCase__ ( __lowercase , __lowercase ): @register_to_config def __init__( self : str , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 88 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "geglu" , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , ) -> List[str]: super().__init__() __lowerCamelCase = num_attention_heads __lowerCamelCase = attention_head_dim __lowerCamelCase = num_attention_heads * attention_head_dim __lowerCamelCase = in_channels __lowerCamelCase = torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE__ , num_channels=SCREAMING_SNAKE_CASE__ , eps=1e-6 , affine=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # 3. Define transformers blocks __lowerCamelCase = nn.ModuleList( [ BasicTransformerBlock( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , cross_attention_dim=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , attention_bias=SCREAMING_SNAKE_CASE__ , double_self_attention=SCREAMING_SNAKE_CASE__ , norm_elementwise_affine=SCREAMING_SNAKE_CASE__ , ) for d in range(SCREAMING_SNAKE_CASE__ ) ] ) __lowerCamelCase = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : bool = True , ) -> int: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = hidden_states.shape __lowerCamelCase = batch_frames // num_frames __lowerCamelCase = hidden_states __lowerCamelCase = hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __lowerCamelCase = self.norm(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.proj_in(SCREAMING_SNAKE_CASE__ ) # 2. Blocks for block in self.transformer_blocks: __lowerCamelCase = block( SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , cross_attention_kwargs=SCREAMING_SNAKE_CASE__ , class_labels=SCREAMING_SNAKE_CASE__ , ) # 3. Output __lowerCamelCase = self.proj_out(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ( hidden_states[None, None, :] .reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __lowerCamelCase = hidden_states.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE__ )
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None: if start is None: __lowerCamelCase = 0 if end is None: __lowerCamelCase = len(__lowerCAmelCase ) - 1 if start >= end: return __lowerCamelCase = (start + end) // 2 slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) if sequence[end] < sequence[mid]: __lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end] slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
339
1
def __magic_name__ ( __lowerCAmelCase : int ) -> bool: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('''check_bouncy() accepts only integer arguments''' ) __lowerCamelCase = str(__lowerCAmelCase ) __lowerCamelCase = ''''''.join(sorted(__lowerCAmelCase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def __magic_name__ ( __lowerCAmelCase : float = 99 ) -> int: if not 0 < percent < 100: raise ValueError('''solution() only accepts values from 0 to 100''' ) __lowerCamelCase = 0 __lowerCamelCase = 1 while True: if check_bouncy(__lowerCAmelCase ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(F'{solution(99)}')
339
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__lowerCAmelCase ) return pairs class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} @property def __A ( self : Dict ) -> int: return len(self.encoder ) def __A ( self : str ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ ) if "\n" in token: __lowerCamelCase = token.replace('''\n''' , ''' __newln__''' ) __lowerCamelCase = token.split(''' ''' ) __lowerCamelCase = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE__ ): continue __lowerCamelCase = token.lower() __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: words.append(SCREAMING_SNAKE_CASE__ ) continue while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) new_word.extend(word[i:j] ) __lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word words.append(SCREAMING_SNAKE_CASE__ ) return " ".join(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = [] __lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) ) return split_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int: __lowerCamelCase = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip() return out_string def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file
339
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Dict = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right SCREAMING_SNAKE_CASE__ : Tuple = 250_004 SCREAMING_SNAKE_CASE__ : str = 250_020 @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : Any = MBartaaTokenizer a__ : Any = MBartaaTokenizerFast a__ : Union[str, Any] = True a__ : str = True def __A ( self : str ) -> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing __lowerCamelCase = MBartaaTokenizer(SCREAMING_SNAKE_CASE__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=SCREAMING_SNAKE_CASE__ ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = '''<s>''' __lowerCamelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple ) -> Optional[int]: __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_54 ) def __A ( self : int ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def __A ( self : Optional[Any] ) -> Optional[int]: __lowerCamelCase = MBartaaTokenizer(SCREAMING_SNAKE_CASE__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def __A ( self : Tuple ) -> Dict: # fmt: off __lowerCamelCase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def __A ( self : Tuple ) -> Dict: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __lowerCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __lowerCamelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __lowerCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Checks everything loads correctly in the same way __lowerCamelCase = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(SCREAMING_SNAKE_CASE__ ) # Save tokenizer rust, legacy_format=True __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Checks it save with the same files self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Checks everything loads correctly in the same way __lowerCamelCase = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE__ ) # Save tokenizer rust, legacy_format=False __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __lowerCamelCase = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE__ ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): a__ : Optional[int] = """facebook/mbart-large-50-one-to-many-mmt""" a__ : Optional[int] = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] a__ : Optional[int] = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] a__ : List[Any] = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2] @classmethod def __A ( cls : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) __lowerCamelCase = 1 return cls def __A ( self : Optional[int] ) -> str: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 ) def __A ( self : Any ) -> Any: __lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids ) __lowerCamelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __lowerCamelCase = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Tuple: __lowerCamelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 10 __lowerCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0] self.assertEqual(ids[0] , SCREAMING_SNAKE_CASE__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] ) -> Optional[Any]: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] ) def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = MBartaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ ) @require_torch def __A ( self : Dict ) -> Any: __lowerCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ) __lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def __A ( self : Any ) -> int: __lowerCamelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) __lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __lowerCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def __A ( self : Union[str, Any] ) -> Optional[int]: __lowerCamelCase = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors='''pt''' ) __lowerCamelCase = self.tokenizer( text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=10 , return_tensors='''pt''' ) __lowerCamelCase = targets['''input_ids'''] __lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def __A ( self : Any ) -> List[Any]: __lowerCamelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , { # en_XX, A, test, EOS '''input_ids''': [[25_00_04, 62, 30_34, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_00_01, } , )
339
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : str = ShapEImgaImgPipeline a__ : Union[str, Any] = ["""image"""] a__ : Optional[int] = ["""image"""] a__ : Union[str, Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a__ : List[str] = False @property def __A ( self : Dict ) -> Optional[Any]: return 32 @property def __A ( self : Optional[int] ) -> Optional[int]: return 32 @property def __A ( self : Optional[int] ) -> List[Any]: return self.time_input_dim * 4 @property def __A ( self : str ) -> List[Any]: return 8 @property def __A ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __A ( self : Dict ) -> int: torch.manual_seed(0 ) __lowerCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Tuple ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE__ ) return model def __A ( self : Optional[int] ) -> List[str]: __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , ) __lowerCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> int: __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : str ) -> Tuple: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : str ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : str ) -> Union[str, Any]: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) __lowerCamelCase = pipe( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from functools import reduce SCREAMING_SNAKE_CASE__ : int = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def __magic_name__ ( __lowerCAmelCase : str = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda __lowerCAmelCase , __lowerCAmelCase : str(int(__lowerCAmelCase ) * int(__lowerCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(__lowerCAmelCase ) - 12 ) ) if __name__ == "__main__": print(F'{solution() = }')
339
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ : str = "" SCREAMING_SNAKE_CASE__ : Any = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def __magic_name__ ( ) -> None: __lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) print('''Processing...''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for index, image in enumerate(__lowerCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase = random_chars(32 ) __lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' ) __lowerCamelCase = [] for anno in new_annos[index]: __lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__lowerCAmelCase ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]: __lowerCamelCase = [] __lowerCamelCase = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ): __lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__lowerCAmelCase ) as in_file: __lowerCamelCase = in_file.readlines() __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' ) __lowerCamelCase = [] for obj_list in obj_lists: __lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for idx in range(len(__lowerCAmelCase ) ): __lowerCamelCase = [] __lowerCamelCase = img_list[idx] path_list.append(__lowerCAmelCase ) __lowerCamelCase = anno_list[idx] __lowerCamelCase = cva.imread(__lowerCAmelCase ) if flip_type == 1: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCAmelCase ) new_imgs_list.append(__lowerCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
339
1
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Dict = TypeVar("T") def __magic_name__ ( __lowerCAmelCase : int ) -> int: return (position - 1) // 2 def __magic_name__ ( __lowerCAmelCase : int ) -> int: return (2 * position) + 1 def __magic_name__ ( __lowerCAmelCase : int ) -> int: return (2 * position) + 2 class lowerCAmelCase__ ( Generic[T] ): def __init__( self : Optional[Any] ) -> None: __lowerCamelCase = [] __lowerCamelCase = {} __lowerCamelCase = 0 def __len__( self : Tuple ) -> int: return self.elements def __repr__( self : str ) -> str: return str(self.heap ) def __A ( self : Optional[Any] ) -> bool: # Check if the priority queue is empty return self.elements == 0 def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) __lowerCamelCase = self.elements self.elements += 1 self._bubble_up(SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __lowerCamelCase , __lowerCamelCase = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __lowerCamelCase , __lowerCamelCase = self.heap[0] self._bubble_down(SCREAMING_SNAKE_CASE__ ) return elem def __A ( self : int , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None: # Update the weight of the given key __lowerCamelCase = self.position_map[elem] __lowerCamelCase = (elem, weight) if position > 0: __lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase = self.heap[parent_position] if parent_weight > weight: self._bubble_up(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] __lowerCamelCase = self.position_map[elem] if curr_pos == 0: return None __lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase = self.heap[curr_pos] __lowerCamelCase , __lowerCamelCase = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_up(SCREAMING_SNAKE_CASE__ ) return None def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] __lowerCamelCase = self.position_map[elem] __lowerCamelCase , __lowerCamelCase = self.heap[curr_pos] __lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements and child_right_position < self.elements: __lowerCamelCase , __lowerCamelCase = self.heap[child_left_position] __lowerCamelCase , __lowerCamelCase = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements: __lowerCamelCase , __lowerCamelCase = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: return None if child_right_position < self.elements: __lowerCamelCase , __lowerCamelCase = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) return None def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None: # Swap the nodes at the given positions __lowerCamelCase = self.heap[nodea_pos][0] __lowerCamelCase = self.heap[nodea_pos][0] __lowerCamelCase , __lowerCamelCase = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __lowerCamelCase = nodea_pos __lowerCamelCase = nodea_pos class lowerCAmelCase__ ( Generic[T] ): def __init__( self : Tuple ) -> None: __lowerCamelCase = {} __lowerCamelCase = 0 def __repr__( self : int ) -> str: return str(self.connections ) def __len__( self : int ) -> int: return self.nodes def __A ( self : Any , SCREAMING_SNAKE_CASE__ : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: __lowerCamelCase = {} self.nodes += 1 def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(SCREAMING_SNAKE_CASE__ ) self.add_node(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = weight __lowerCamelCase = weight def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: __lowerCamelCase = {node: maxsize for node in graph.connections} __lowerCamelCase = {node: None for node in graph.connections} __lowerCamelCase = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__lowerCAmelCase , __lowerCAmelCase ) if priority_queue.is_empty(): return dist, parent # initialization __lowerCamelCase = priority_queue.extract_min() __lowerCamelCase = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __lowerCamelCase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowerCAmelCase , dist[neighbour] ) __lowerCamelCase = node # running prim's algorithm while not priority_queue.is_empty(): __lowerCamelCase = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __lowerCamelCase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowerCAmelCase , dist[neighbour] ) __lowerCamelCase = node return dist, parent
339
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( __lowerCAmelCase : Any ) -> int: __lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0] @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(rows * cols * num_images ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) __lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) return data @deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict: __lowerCamelCase = labels_dense.shape[0] __lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes __lowerCamelCase = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase = 1 return labels_one_hot @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(__lowerCAmelCase ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase ) return labels class lowerCAmelCase__ : @deprecated( SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: __lowerCamelCase = 1_00_00 __lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' __lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase = images.astype(numpy.floataa ) __lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 ) __lowerCamelCase = images __lowerCamelCase = labels __lowerCamelCase = 0 __lowerCamelCase = 0 @property def __A ( self : str ) -> Optional[int]: return self._images @property def __A ( self : Any ) -> Dict: return self._labels @property def __A ( self : List[Any] ) -> int: return self._num_examples @property def __A ( self : str ) -> Any: return self._epochs_completed def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str: if fake_data: __lowerCamelCase = [1] * 7_84 __lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE__ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE__ )], ) __lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perma] __lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase = self._num_examples - start __lowerCamelCase = self._images[start : self._num_examples] __lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perm] __lowerCamelCase = self.labels[perm] # Start next epoch __lowerCamelCase = 0 __lowerCamelCase = batch_size - rest_num_examples __lowerCamelCase = self._index_in_epoch __lowerCamelCase = self._images[start:end] __lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: if not gfile.Exists(__lowerCAmelCase ): gfile.MakeDirs(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if not gfile.Exists(__lowerCAmelCase ): urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310 with gfile.GFile(__lowerCAmelCase ) as f: __lowerCamelCase = f.size() print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' ) return filepath @deprecated( __lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase ) __lowerCamelCase = fake() __lowerCamelCase = fake() __lowerCamelCase = fake() return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase ) if not source_url: # empty string check __lowerCamelCase = DEFAULT_SOURCE_URL __lowerCamelCase = '''train-images-idx3-ubyte.gz''' __lowerCamelCase = '''train-labels-idx1-ubyte.gz''' __lowerCamelCase = '''t10k-images-idx3-ubyte.gz''' __lowerCamelCase = '''t10k-labels-idx1-ubyte.gz''' __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) if not 0 <= validation_size <= len(__lowerCAmelCase ): __lowerCamelCase = ( '''Validation size should be between 0 and ''' f'''{len(__lowerCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(__lowerCAmelCase ) __lowerCamelCase = train_images[:validation_size] __lowerCamelCase = train_labels[:validation_size] __lowerCamelCase = train_images[validation_size:] __lowerCamelCase = train_labels[validation_size:] __lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
339
1
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCAmelCase__ : @staticmethod def __A ( *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: pass @is_pipeline_test @require_vision @require_timm @require_torch class lowerCAmelCase__ ( unittest.TestCase ): a__ : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]: __lowerCamelCase = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int: __lowerCamelCase = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 ) self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 ) for detected_object in outputs: self.assertEqual( SCREAMING_SNAKE_CASE__ , { '''score''': ANY(SCREAMING_SNAKE_CASE__ ), '''label''': ANY(SCREAMING_SNAKE_CASE__ ), '''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE__ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE__ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE__ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE__ )}, } , ) import datasets __lowerCamelCase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) __lowerCamelCase = [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] __lowerCamelCase = object_detector(SCREAMING_SNAKE_CASE__ , threshold=0.0 ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for outputs in batch_outputs: self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 ) for detected_object in outputs: self.assertEqual( SCREAMING_SNAKE_CASE__ , { '''score''': ANY(SCREAMING_SNAKE_CASE__ ), '''label''': ANY(SCREAMING_SNAKE_CASE__ ), '''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE__ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE__ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE__ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE__ )}, } , ) @require_tf @unittest.skip('''Object detection not implemented in TF''' ) def __A ( self : str ) -> List[Any]: pass @require_torch def __A ( self : List[Any] ) -> str: __lowerCamelCase = '''hf-internal-testing/tiny-detr-mobilenetsv3''' __lowerCamelCase = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, ] , ) __lowerCamelCase = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [ [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, ], [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}}, ], ] , ) @require_torch @slow def __A ( self : List[str] ) -> Tuple: __lowerCamelCase = '''facebook/detr-resnet-50''' __lowerCamelCase = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ] , ) __lowerCamelCase = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ], ] , ) @require_torch @slow def __A ( self : Dict ) -> List[str]: __lowerCamelCase = '''facebook/detr-resnet-50''' __lowerCamelCase = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ] , ) __lowerCamelCase = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ], ] , ) @require_torch @slow def __A ( self : Union[str, Any] ) -> Optional[Any]: __lowerCamelCase = 0.9985 __lowerCamelCase = '''facebook/detr-resnet-50''' __lowerCamelCase = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [ {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}}, ] , ) @require_torch @require_pytesseract @slow def __A ( self : Dict ) -> str: __lowerCamelCase = '''Narsil/layoutlmv3-finetuned-funsd''' __lowerCamelCase = 0.9993 __lowerCamelCase = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE__ , threshold=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = object_detector( '''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [ {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}}, {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}}, ] , )
339
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } SCREAMING_SNAKE_CASE__ : Dict = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = VOCAB_FILES_NAMES a__ : Any = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Optional[Any] = SqueezeBertTokenizer def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = do_lower_case def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str: __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: __lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
339
1
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset SCREAMING_SNAKE_CASE__ : List[Any] = random.Random() def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=1.0 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ) -> Tuple: if rng is None: __lowerCamelCase = global_rng __lowerCamelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_00 , SCREAMING_SNAKE_CASE__ : int=20_00 , SCREAMING_SNAKE_CASE__ : Any=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=1_28 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : List[str]=30 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_41_00 , ) -> List[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = min_seq_length __lowerCamelCase = max_seq_length __lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCamelCase = spectrogram_length __lowerCamelCase = feature_size __lowerCamelCase = num_audio_channels __lowerCamelCase = hop_length __lowerCamelCase = chunk_length __lowerCamelCase = sampling_rate def __A ( self : Union[str, Any] ) -> Tuple: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=False ) -> int: def _flatten(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) ) if equal_length: __lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowerCamelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCamelCase = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : List[Any] = TvltFeatureExtractor def __A ( self : Dict ) -> List[Any]: __lowerCamelCase = TvltFeatureExtractionTester(self ) def __A ( self : Optional[int] ) -> Optional[int]: __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''spectrogram_length''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''feature_size''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''num_audio_channels''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''hop_length''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''chunk_length''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''sampling_rate''' ) ) def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE__ )[0] check_json_file_has_correct_format(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = feat_extract_first.to_dict() __lowerCamelCase = feat_extract_second.to_dict() __lowerCamelCase = dict_first.pop('''mel_filters''' ) __lowerCamelCase = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> Tuple: __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase = os.path.join(SCREAMING_SNAKE_CASE__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = feat_extract_first.to_dict() __lowerCamelCase = feat_extract_second.to_dict() __lowerCamelCase = dict_first.pop('''mel_filters''' ) __lowerCamelCase = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] ) -> int: # Initialize feature_extractor __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 __lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __lowerCamelCase = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs] # Test not batched input __lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched __lowerCamelCase = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking __lowerCamelCase = feature_extractor( SCREAMING_SNAKE_CASE__ , return_tensors='''np''' , sampling_rate=4_41_00 , mask_audio=SCREAMING_SNAKE_CASE__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. __lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __lowerCamelCase = np.asarray(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> Tuple: __lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech __lowerCamelCase = ds.sort('''id''' ).select(range(SCREAMING_SNAKE_CASE__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __A ( self : int ) -> Union[str, Any]: __lowerCamelCase = self._load_datasamples(1 ) __lowerCamelCase = TvltFeatureExtractor() __lowerCamelCase = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) ) __lowerCamelCase = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool: return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : List[str] = KandinskyVaaInpaintPipeline a__ : List[Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] a__ : Optional[Any] = [ """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] a__ : Tuple = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a__ : Union[str, Any] = False @property def __A ( self : List[str] ) -> Dict: return 32 @property def __A ( self : Dict ) -> Union[str, Any]: return 32 @property def __A ( self : List[str] ) -> List[Any]: return self.time_input_dim @property def __A ( self : Union[str, Any] ) -> Union[str, Any]: return self.time_input_dim * 4 @property def __A ( self : Union[str, Any] ) -> List[str]: return 1_00 @property def __A ( self : Tuple ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = { '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } __lowerCamelCase = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Any ) -> Optional[Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __A ( self : Tuple ) -> str: torch.manual_seed(0 ) __lowerCamelCase = VQModel(**self.dummy_movq_kwargs ) return model def __A ( self : Tuple ) -> int: __lowerCamelCase = self.dummy_unet __lowerCamelCase = self.dummy_movq __lowerCamelCase = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=0 ) -> int: __lowerCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( SCREAMING_SNAKE_CASE__ ) # create init_image __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('''RGB''' ).resize((2_56, 2_56) ) # create mask __lowerCamelCase = np.ones((64, 64) , dtype=np.floataa ) __lowerCamelCase = 0 if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def __A ( self : int ) -> Tuple: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.images __lowerCamelCase = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] print(f'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array( [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def __A ( self : List[str] ) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Dict ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : int ) -> List[Any]: __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' ) __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) __lowerCamelCase = np.ones((7_68, 7_68) , dtype=np.floataa ) __lowerCamelCase = 0 __lowerCamelCase = '''a hat''' __lowerCamelCase = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = KandinskyVaaInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa ) __lowerCamelCase = pipeline.to(SCREAMING_SNAKE_CASE__ ) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowerCamelCase , __lowerCamelCase = pipe_prior( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() __lowerCamelCase = pipeline( image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , ) __lowerCamelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Dict = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
1
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. __lowerCamelCase , __lowerCamelCase = y, x % y return abs(__lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: try: __lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __lowerCamelCase = int(nums[0] ) __lowerCamelCase = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
339
1
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : str ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) __lowerCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) sd_pipe.set_scheduler('''sample_euler''' ) __lowerCamelCase = '''A painting of a squirrel eating a burger''' __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) __lowerCamelCase = output.images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCamelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : int ) -> Tuple: __lowerCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __lowerCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) sd_pipe.set_scheduler('''sample_euler''' ) __lowerCamelCase = '''A painting of a squirrel eating a burger''' __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) __lowerCamelCase = output.images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCamelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def __A ( self : str ) -> str: __lowerCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __lowerCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) __lowerCamelCase = '''A painting of a squirrel eating a burger''' __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = output.images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCamelCase = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids __lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids __lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits __lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean() __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
339
1
from ..utils import DummyObject, requires_backends class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Union[str, Any] = ["""sentencepiece"""] def __init__( self : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> Tuple: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : str = ["""sentencepiece"""] def __init__( self : int , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Dict = ["""sentencepiece"""] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : int ) -> str: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Tuple = ["""sentencepiece"""] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Any = ["""sentencepiece"""] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Any = ["""sentencepiece"""] def __init__( self : int , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Dict = ["""sentencepiece"""] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : int = ["""sentencepiece"""] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Dict = ["""sentencepiece"""] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[str] ) -> int: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : int = ["""sentencepiece"""] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : int ) -> Any: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Optional[int] = ["""sentencepiece"""] def __init__( self : int , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : str = ["""sentencepiece"""] def __init__( self : str , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Union[str, Any] = ["""sentencepiece"""] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : int = ["""sentencepiece"""] def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Dict ) -> str: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : List[Any] = ["""sentencepiece"""] def __init__( self : int , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Optional[Any] = ["""sentencepiece"""] def __init__( self : int , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Dict: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Optional[int] = ["""sentencepiece"""] def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ) -> Tuple: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Tuple = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Tuple: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Optional[int] = ["""sentencepiece"""] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Tuple = ["""sentencepiece"""] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : str ) -> List[str]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : List[str] = ["""sentencepiece"""] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Tuple ) -> int: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Optional[int] = ["""sentencepiece"""] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Any: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Any = ["""sentencepiece"""] def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Any ) -> Any: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : Optional[Any] = ["""sentencepiece"""] def __init__( self : int , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : str = ["""sentencepiece"""] def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> List[str]: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : str = ["""sentencepiece"""] def __init__( self : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> Any: requires_backends(self , ['''sentencepiece'''] ) class lowerCAmelCase__ ( metaclass=__lowercase ): a__ : int = ["""sentencepiece"""] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: requires_backends(self , ['''sentencepiece'''] )
339
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = "bart" SCREAMING_SNAKE_CASE__ : Dict = True @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> str: if LOAD_DENSE_INDEX: __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __lowerCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __lowerCamelCase = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __lowerCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __lowerCamelCase = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> Optional[int]: if LOAD_DENSE_INDEX: __lowerCamelCase = faiss.StandardGpuResources() __lowerCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __lowerCamelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase = faiss.IndexFlatIP(128 ) __lowerCamelCase = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase ) wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase = (None, None) __lowerCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> List[str]: __lowerCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __lowerCamelCase = elia['''train_eli5'''] __lowerCamelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __lowerCamelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__lowerCAmelCase ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_indexes() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = load_models() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_train_data() def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=10 ) -> List[str]: __lowerCamelCase = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = [elia_train[int(__lowerCAmelCase )] for i in I[0]] return nn_examples def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict="wiki40b" , __lowerCAmelCase : Any="dense" , __lowerCAmelCase : Dict=10 ) -> Union[str, Any]: if source == "none": __lowerCamelCase , __lowerCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase = query_qa_dense_index( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: __lowerCamelCase , __lowerCamelCase = query_es_index( __lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , ) __lowerCamelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __lowerCamelCase = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __lowerCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None), } ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=64 , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[Any]=0.95 , __lowerCAmelCase : List[Any]=0.8 ) -> Any: with torch.no_grad(): __lowerCamelCase = qa_sas_generate( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar SCREAMING_SNAKE_CASE__ : List[str] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" SCREAMING_SNAKE_CASE__ : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE__ : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE__ : str = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.checkbox("Demo options") if demo_options: SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.selectbox( "", action_list, index=3, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = action_list.index(action_st) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = show_type == "Show full text of passages" else: SCREAMING_SNAKE_CASE__ : Any = 3 SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options") if retrieval_options: SCREAMING_SNAKE_CASE__ : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: SCREAMING_SNAKE_CASE__ : List[str] = "wiki40b" SCREAMING_SNAKE_CASE__ : Optional[Any] = "dense" SCREAMING_SNAKE_CASE__ : str = "beam" SCREAMING_SNAKE_CASE__ : List[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 64 SCREAMING_SNAKE_CASE__ : List[Any] = 256 SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.checkbox("Generation options") if generate_options: SCREAMING_SNAKE_CASE__ : Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE__ : str = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Dict = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # start main text SCREAMING_SNAKE_CASE__ : Any = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] SCREAMING_SNAKE_CASE__ : List[str] = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.text_input("Enter your question here:", "") else: SCREAMING_SNAKE_CASE__ : str = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_support(question, source=wiki_source, method="dense", n_results=10) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = make_support(question, source=wiki_source, method="sparse", n_results=10) SCREAMING_SNAKE_CASE__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE__ : Optional[Any] = support_list[:10] SCREAMING_SNAKE_CASE__ : Tuple = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) SCREAMING_SNAKE_CASE__ : Tuple = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE__ : Union[str, Any] = "[{}]({})".format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE__ : Dict = sec_titles.split(" & ") SCREAMING_SNAKE_CASE__ : int = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE__ : Any = find_nearest_training(question) SCREAMING_SNAKE_CASE__ : List[Any] = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) SCREAMING_SNAKE_CASE__ : List[Any] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
339
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") SCREAMING_SNAKE_CASE__ : str = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : Optional[str] = field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a__ : Optional[str] = field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) a__ : int = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : bool = field( default=__lowercase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) a__ : bool = field( default=__lowercase , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) a__ : Optional[int] = field( default=__lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a__ : Optional[int] = field( default=__lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) a__ : Optional[int] = field( default=__lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """A csv or a json file containing the training data."""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """A csv or a json file containing the validation data."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """A csv or a json file containing the test data."""} ) def __A ( self : Tuple ) -> Optional[int]: if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' ) else: __lowerCamelCase = self.train_file.split('''.''' )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __lowerCamelCase = self.validation_file.split('''.''' )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class lowerCAmelCase__ : a__ : str = field( default=__lowercase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : bool = field( default=__lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a__ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a__ : bool = field( default=__lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def __magic_name__ ( ) -> Tuple: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) __lowerCamelCase = training_args.get_process_log_level() logger.setLevel(__lowerCAmelCase ) datasets.utils.logging.set_verbosity(__lowerCAmelCase ) transformers.utils.logging.set_verbosity(__lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __lowerCamelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCamelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __lowerCamelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __lowerCamelCase = {'''train''': data_args.train_file, '''validation''': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __lowerCamelCase = data_args.train_file.split('''.''' )[-1] __lowerCamelCase = data_args.test_file.split('''.''' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __lowerCamelCase = data_args.test_file else: raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' ) for key in data_files.keys(): logger.info(f'''load a local file for {key}: {data_files[key]}''' ) if data_args.train_file.endswith('''.csv''' ): # Loading a dataset from local csv files __lowerCamelCase = load_dataset('''csv''' , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __lowerCamelCase = load_dataset('''json''' , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __lowerCamelCase = raw_datasets['''train'''].features['''label'''].names __lowerCamelCase = len(__lowerCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __lowerCamelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__lowerCAmelCase , ) __lowerCamelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __lowerCamelCase = '''max_length''' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __lowerCamelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. __lowerCamelCase = {'''Refused''': 0, '''Entailed''': 1} __lowerCamelCase = {0: '''Refused''', 1: '''Entailed'''} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __lowerCamelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(__lowerCAmelCase : Optional[int] ): # Tokenize the texts def _convert_table_text_to_pandas(__lowerCAmelCase : Optional[Any] ): __lowerCamelCase = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )] __lowerCamelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __lowerCamelCase = examples['''statement'''] __lowerCamelCase = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) ) __lowerCamelCase = tokenizer(__lowerCAmelCase , __lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ) __lowerCamelCase = examples['''label'''] return result with training_args.main_process_first(desc='''dataset map pre-processing''' ): __lowerCamelCase = raw_datasets.map( __lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) __lowerCamelCase = raw_datasets['''train'''] if data_args.max_train_samples is not None: __lowerCamelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) __lowerCamelCase = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: __lowerCamelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('''--do_predict requires a test dataset''' ) __lowerCamelCase = raw_datasets['''test'''] if data_args.max_predict_samples is not None: __lowerCamelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(__lowerCAmelCase ) ) , 3 ): logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowerCAmelCase : EvalPrediction ): __lowerCamelCase = p.predictions[0] if isinstance(p.predictions , __lowerCAmelCase ) else p.predictions __lowerCamelCase = np.argmax(__lowerCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __lowerCamelCase = default_data_collator elif training_args.fpaa: __lowerCamelCase = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) else: __lowerCamelCase = None # Initialize our Trainer __lowerCamelCase = Trainer( model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , ) # Training if training_args.do_train: __lowerCamelCase = None if training_args.resume_from_checkpoint is not None: __lowerCamelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCamelCase = last_checkpoint __lowerCamelCase = trainer.train(resume_from_checkpoint=__lowerCAmelCase ) __lowerCamelCase = train_result.metrics __lowerCamelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase ) ) __lowerCamelCase = min(__lowerCAmelCase , len(__lowerCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('''train''' , __lowerCAmelCase ) trainer.save_metrics('''train''' , __lowerCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(eval_dataset=__lowerCAmelCase ) __lowerCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCAmelCase ) __lowerCamelCase = min(__lowerCAmelCase , len(__lowerCAmelCase ) ) trainer.log_metrics('''eval''' , __lowerCAmelCase ) trainer.save_metrics('''eval''' , __lowerCAmelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __lowerCamelCase = predict_dataset.remove_columns('''label''' ) __lowerCamelCase = trainer.predict(__lowerCAmelCase , metric_key_prefix='''predict''' ).predictions __lowerCamelCase = np.argmax(__lowerCAmelCase , axis=1 ) __lowerCamelCase = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' ) if trainer.is_world_process_zero(): with open(__lowerCAmelCase , '''w''' ) as writer: logger.info('''***** Predict Results *****''' ) writer.write('''index\tprediction\n''' ) for index, item in enumerate(__lowerCAmelCase ): __lowerCamelCase = label_list[item] writer.write(f'''{index}\t{item}\n''' ) __lowerCamelCase = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''} if training_args.push_to_hub: trainer.push_to_hub(**__lowerCAmelCase ) else: trainer.create_model_card(**__lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : str ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Dict = """xmod""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout __lowerCamelCase = pre_norm __lowerCamelCase = adapter_reduction_factor __lowerCamelCase = adapter_layer_norm __lowerCamelCase = adapter_reuse_layer_norm __lowerCamelCase = ln_before_adapter __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = default_language class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
339
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } SCREAMING_SNAKE_CASE__ : Dict = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def __magic_name__ ( __lowerCAmelCase : Any ) -> str: __lowerCamelCase = {} with open(__lowerCAmelCase , '''r''' ) as file: for line_number, line in enumerate(__lowerCAmelCase ): __lowerCamelCase = line.strip() if line: __lowerCamelCase = line.split() __lowerCamelCase = line_number __lowerCamelCase = words[0] __lowerCamelCase = value return result def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] ) -> int: for attribute in key.split('''.''' ): __lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__lowerCAmelCase ): __lowerCamelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCamelCase = '''param''' if weight_type is not None and weight_type != "param": __lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape elif weight_type is not None and weight_type == "param": __lowerCamelCase = hf_pointer for attribute in hf_param_name.split('''.''' ): __lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = shape_pointer.shape # let's reduce dimension __lowerCamelCase = value[0] else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = value else: __lowerCamelCase = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> List[str]: __lowerCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__lowerCAmelCase ): __lowerCamelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCamelCase = '''param''' if weight_type is not None and weight_type != "param": __lowerCamelCase = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __lowerCamelCase = '''.'''.join([key, hf_param_name] ) else: __lowerCamelCase = key __lowerCamelCase = value if '''lm_head''' in full_key else value[0] SCREAMING_SNAKE_CASE__ : List[str] = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : List[Any]=None ) -> int: __lowerCamelCase = False for key, mapped_key in MAPPING.items(): __lowerCamelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(__lowerCAmelCase )[0].split('''.''' )[-2] __lowerCamelCase = mapped_key.replace('''*''' , __lowerCAmelCase ) if "weight_g" in name: __lowerCamelCase = '''weight_g''' elif "weight_v" in name: __lowerCamelCase = '''weight_v''' elif "bias" in name: __lowerCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase = '''weight''' else: __lowerCamelCase = None if hf_dict is not None: rename_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return is_used return is_used def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> int: __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , ) __lowerCamelCase = True else: __lowerCamelCase = load_wavaveca_layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not is_used: unused_weights.append(__lowerCAmelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> int: __lowerCamelCase = full_name.split('''conv_layers.''' )[-1] __lowerCamelCase = name.split('''.''' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __lowerCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __lowerCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) __lowerCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) __lowerCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCAmelCase ) @torch.no_grad() def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : int=True , __lowerCAmelCase : Any=False ) -> List[str]: if config_path is not None: __lowerCamelCase = WavaVecaConfig.from_pretrained(__lowerCAmelCase ) else: __lowerCamelCase = WavaVecaConfig() if is_seq_class: __lowerCamelCase = read_txt_into_dict(__lowerCAmelCase ) __lowerCamelCase = idalabel __lowerCamelCase = WavaVecaForSequenceClassification(__lowerCAmelCase ) __lowerCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ) feature_extractor.save_pretrained(__lowerCAmelCase ) elif is_finetuned: if dict_path: __lowerCamelCase = Dictionary.load(__lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowerCamelCase = target_dict.pad_index __lowerCamelCase = target_dict.bos_index __lowerCamelCase = target_dict.eos_index __lowerCamelCase = len(target_dict.symbols ) __lowerCamelCase = os.path.join(__lowerCAmelCase , '''vocab.json''' ) if not os.path.isdir(__lowerCAmelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCAmelCase ) ) return os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) __lowerCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched __lowerCamelCase = 0 __lowerCamelCase = 1 with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = WavaVecaCTCTokenizer( __lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCAmelCase , ) __lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False __lowerCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ) __lowerCamelCase = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) __lowerCamelCase = WavaVecaForCTC(__lowerCAmelCase ) else: __lowerCamelCase = WavaVecaForPreTraining(__lowerCAmelCase ) if is_finetuned or is_seq_class: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowerCamelCase = argparse.Namespace(task='''audio_pretraining''' ) __lowerCamelCase = fairseq.tasks.setup_task(__lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCAmelCase ) __lowerCamelCase = model[0].eval() recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Union[str, Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
339
from collections import namedtuple import requests from lxml import html # type: ignore SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
339
1
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = ScoreSdeVeScheduler() __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Tuple ) -> str: __lowerCamelCase = '''google/ncsnpp-church-256''' __lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) a__ : Optional[str] = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) a__ : Optional[int] = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} ) a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} ) a__ : bool = field( default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict: logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) ) def __magic_name__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__lowerCAmelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __lowerCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __lowerCamelCase = SeqaSeqDataset # Get datasets __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __lowerCamelCase = ( build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None ) __lowerCamelCase = SeqaSeqTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator( __lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) __lowerCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __lowerCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __lowerCamelCase = train_result.metrics __lowerCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) __lowerCamelCase = data_args.n_val __lowerCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' ) __lowerCamelCase = test_output.metrics __lowerCamelCase = data_args.n_test if trainer.is_world_process_zero(): __lowerCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.predict_with_generate: __lowerCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) __lowerCamelCase = lmap(str.strip , __lowerCAmelCase ) write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys SCREAMING_SNAKE_CASE__ : List[Any] = "3" print("Python version:", sys.version) print("OS platform:", platform.platform()) print("OS architecture:", platform.machine()) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) except ImportError: print("Torch version:", None) try: import transformers print("transformers version:", transformers.__version__) except ImportError: print("transformers version:", None)
339
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = ScoreSdeVeScheduler() __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Tuple ) -> str: __lowerCamelCase = '''google/ncsnpp-church-256''' __lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : Tuple = { "configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : int = [ "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "GraphormerForGraphClassification", "GraphormerModel", "GraphormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
from functools import lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> set: __lowerCamelCase = 2 __lowerCamelCase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__lowerCAmelCase ) if n > 1: factors.add(__lowerCAmelCase ) return factors @lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> int: return len(unique_prime_factors(__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : list ) -> bool: return len(set(__lowerCAmelCase ) ) in (0, 1) def __magic_name__ ( __lowerCAmelCase : int ) -> list: __lowerCamelCase = 2 while True: # Increment each value of a generated range __lowerCamelCase = [base + i for i in range(__lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. __lowerCamelCase = [upf_len(__lowerCAmelCase ) for x in group] checker.append(__lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(__lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def __magic_name__ ( __lowerCAmelCase : int = 4 ) -> int: __lowerCamelCase = run(__lowerCAmelCase ) return results[0] if len(__lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
339
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
339
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def __A ( self : Any ) -> Tuple: return TaConfig.from_pretrained('''google/umt5-base''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]: if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, input_dict def __A ( self : Tuple ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __A ( self : Optional[Any] ) -> Any: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : List[Any] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval() # first forward pass __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval() __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() ) @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ : Tuple = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ : int = True a__ : int = False a__ : Tuple = False a__ : Optional[int] = True a__ : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ : Tuple = [0.8, 0.9] def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __A ( self : Union[str, Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Any: __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() model.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __A ( self : Tuple ) -> Optional[Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
import requests from bsa import BeautifulSoup def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str: __lowerCamelCase = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , '''html.parser''' ) __lowerCamelCase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) __lowerCamelCase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2_018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
339
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Union[str, Any] = """open-llama""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict: __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = rms_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_dropout_prob __lowerCamelCase = use_stable_embedding __lowerCamelCase = shared_input_output_embedding __lowerCamelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) __lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
339
1
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class lowerCAmelCase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : str = None , ) -> str: super().__init__() __lowerCamelCase = initial_learning_rate __lowerCamelCase = warmup_steps __lowerCamelCase = power __lowerCamelCase = decay_schedule_fn __lowerCamelCase = name def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __lowerCamelCase = tf.cast(SCREAMING_SNAKE_CASE__ , tf.floataa ) __lowerCamelCase = tf.cast(self.warmup_steps , tf.floataa ) __lowerCamelCase = global_step_float / warmup_steps_float __lowerCamelCase = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE__ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Union[str, Any] ) -> Dict: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def __magic_name__ ( __lowerCAmelCase : float , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : float = 0.9 , __lowerCAmelCase : float = 0.999 , __lowerCAmelCase : float = 1E-8 , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : Optional[List[str]] = None , ) -> Tuple: __lowerCamelCase = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , ) if num_warmup_steps: __lowerCamelCase = WarmUp( initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , ) if weight_decay_rate > 0.0: __lowerCamelCase = AdamWeightDecay( learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=__lowerCAmelCase , ) else: __lowerCamelCase = tf.keras.optimizers.Adam( learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class lowerCAmelCase__ ( __lowercase ): def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , SCREAMING_SNAKE_CASE__ : float = 0.9 , SCREAMING_SNAKE_CASE__ : float = 0.999 , SCREAMING_SNAKE_CASE__ : float = 1e-7 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = weight_decay_rate __lowerCamelCase = include_in_weight_decay __lowerCamelCase = exclude_from_weight_decay @classmethod def __A ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: __lowerCamelCase = {'''WarmUp''': WarmUp} return super(SCREAMING_SNAKE_CASE__ , cls ).from_config(SCREAMING_SNAKE_CASE__ , custom_objects=SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]: super(SCREAMING_SNAKE_CASE__ , self )._prepare_local(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tf.constant( self.weight_decay_rate , name='''adam_weight_decay_rate''' ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , ) return tf.no_op() def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Dict ) -> str: __lowerCamelCase , __lowerCamelCase = list(zip(*SCREAMING_SNAKE_CASE__ ) ) return super(SCREAMING_SNAKE_CASE__ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , name=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]: if apply_state is None: return self._decayed_lr_t[var_dtype], {} __lowerCamelCase = apply_state or {} __lowerCamelCase = apply_state.get((var_device, var_dtype) ) if coefficients is None: __lowerCamelCase = self._fallback_apply_state(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self._decay_weights_op(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE__ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self._decay_weights_op(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE__ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: __lowerCamelCase = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) is not None: return False return True class lowerCAmelCase__ ( __lowercase ): def __init__( self : Dict ) -> str: __lowerCamelCase = [] __lowerCamelCase = None @property def __A ( self : Any ) -> List[str]: if self._accum_steps is None: __lowerCamelCase = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def __A ( self : List[str] ) -> Tuple: if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> str: if not self._gradients: __lowerCamelCase = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(SCREAMING_SNAKE_CASE__ ) , trainable=SCREAMING_SNAKE_CASE__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(SCREAMING_SNAKE_CASE__ ) != len(self._gradients ): raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE__ )}''' ) for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE__ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(SCREAMING_SNAKE_CASE__ ) self._accum_steps.assign_add(1 ) def __A ( self : Optional[int] ) -> Optional[Any]: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE__ ) )
339
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY") SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL") @dataclass(frozen=__lowercase , slots=__lowercase ) class lowerCAmelCase__ ( Generic[KEY, VAL] ): a__ : KEY a__ : VAL class lowerCAmelCase__ ( _Item ): def __init__( self : str ) -> None: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __bool__( self : Tuple ) -> bool: return False SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem() class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ): def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None: __lowerCamelCase = initial_block_size __lowerCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowerCamelCase = capacity_factor __lowerCamelCase = 0 def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int: return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int: return (ind + 1) % len(self._buckets ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool: __lowerCamelCase = self._buckets[ind] if not stored: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self._len += 1 return True elif stored.key == key: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return True else: return False def __A ( self : Any ) -> bool: __lowerCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False __lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None: __lowerCamelCase = self._buckets __lowerCamelCase = [None] * new_size __lowerCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __A ( self : str ) -> None: self._resize(len(self._buckets ) * 2 ) def __A ( self : Dict ) -> None: self._resize(len(self._buckets ) // 2 ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]: __lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ ) for _ in range(len(self._buckets ) ): yield ind __lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): break def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE__ ) if item is _deleted: continue if item.key == key: __lowerCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE__ ) def __len__( self : int ) -> int: return self._len def __iter__( self : Tuple ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ) -> str: __lowerCamelCase = ''' ,'''.join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
339
1
from collections import defaultdict from math import ceil, sqrt def __magic_name__ ( __lowerCAmelCase : int = 100_0000 , __lowerCAmelCase : int = 10 ) -> int: __lowerCamelCase = defaultdict(__lowerCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __lowerCamelCase = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __lowerCamelCase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F'{solution() = }')
339
from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE__ : Any = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def __magic_name__ ( ) -> Any: __lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCamelCase = g.get_repo('''huggingface/transformers''' ) __lowerCamelCase = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) __lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
339
1
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowerCAmelCase__ ( nn.Module ): a__ : int a__ : int a__ : float = 0.0 a__ : int = 1 a__ : int = 1 a__ : bool = True a__ : bool = False a__ : bool = False a__ : bool = False a__ : jnp.dtype = jnp.floataa def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=True ) -> Optional[int]: __lowerCamelCase = () for resnet, attn in zip(self.resnets , self.attentions ): __lowerCamelCase = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = attn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(SCREAMING_SNAKE_CASE__ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCAmelCase__ ( nn.Module ): a__ : int a__ : int a__ : float = 0.0 a__ : int = 1 a__ : bool = True a__ : jnp.dtype = jnp.floataa def __A ( self : Optional[Any] ) -> List[Any]: __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = resnets if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> Union[str, Any]: __lowerCamelCase = () for resnet in self.resnets: __lowerCamelCase = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(SCREAMING_SNAKE_CASE__ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCAmelCase__ ( nn.Module ): a__ : int a__ : int a__ : int a__ : float = 0.0 a__ : int = 1 a__ : int = 1 a__ : bool = True a__ : bool = False a__ : bool = False a__ : bool = False a__ : jnp.dtype = jnp.floataa def __A ( self : str ) -> Dict: __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=True ) -> List[str]: for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = attn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(SCREAMING_SNAKE_CASE__ ) return hidden_states class lowerCAmelCase__ ( nn.Module ): a__ : int a__ : int a__ : int a__ : float = 0.0 a__ : int = 1 a__ : bool = True a__ : jnp.dtype = jnp.floataa def __A ( self : Tuple ) -> int: __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = resnets if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=True ) -> Dict: for resnet in self.resnets: # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(SCREAMING_SNAKE_CASE__ ) return hidden_states class lowerCAmelCase__ ( nn.Module ): a__ : int a__ : float = 0.0 a__ : int = 1 a__ : int = 1 a__ : bool = False a__ : bool = False a__ : jnp.dtype = jnp.floataa def __A ( self : List[str] ) -> str: # there is always at least one resnet __lowerCamelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] __lowerCamelCase = [] for _ in range(self.num_layers ): __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = resnets __lowerCamelCase = attentions def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=True ) -> Any: __lowerCamelCase = self.resnets[0](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): __lowerCamelCase = attn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) return hidden_states
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.getLogger(__name__) torch.set_grad_enabled(False) SCREAMING_SNAKE_CASE__ : int = "cuda" if torch.cuda.is_available() else "cpu" def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple=100 , __lowerCAmelCase : Optional[int]=" " ) -> List[str]: __lowerCamelCase = text.split(__lowerCAmelCase ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )] def __magic_name__ ( __lowerCAmelCase : dict ) -> dict: __lowerCamelCase , __lowerCamelCase = [], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__lowerCAmelCase ): titles.append(title if title is not None else '''''' ) texts.append(__lowerCAmelCase ) return {"title": titles, "text": texts} def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : DPRContextEncoder , __lowerCAmelCase : DPRContextEncoderTokenizerFast ) -> dict: __lowerCamelCase = ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__lowerCAmelCase , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] __lowerCamelCase = ctx_encoder(input_ids.to(device=__lowerCAmelCase ) , return_dict=__lowerCAmelCase ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def __magic_name__ ( __lowerCAmelCase : "RagExampleArguments" , __lowerCAmelCase : "ProcessingArguments" , __lowerCAmelCase : "IndexHnswArguments" , ) -> List[str]: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way __lowerCamelCase = load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words __lowerCamelCase = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=processing_args.num_proc ) # And compute the embeddings __lowerCamelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCAmelCase ) __lowerCamelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) __lowerCamelCase = Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space __lowerCamelCase = dataset.map( partial(__lowerCAmelCase , ctx_encoder=__lowerCAmelCase , ctx_tokenizer=__lowerCAmelCase ) , batched=__lowerCAmelCase , batch_size=processing_args.batch_size , features=__lowerCAmelCase , ) # And finally save your dataset __lowerCamelCase = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__lowerCAmelCase ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search __lowerCamelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__lowerCAmelCase ) # And save the index __lowerCamelCase = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__lowerCAmelCase ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class lowerCAmelCase__ : a__ : str = field( default=str(Path(__lowercase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , ) a__ : str = field( default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , ) a__ : str = field( default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={ """help""": ( """The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or""" """ 'facebook/dpr-ctx_encoder-multiset-base'""" ) } , ) a__ : Optional[str] = field( default=str(Path(__lowercase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , ) @dataclass class lowerCAmelCase__ : a__ : Optional[int] = field( default=__lowercase , metadata={ """help""": """The number of processes to use to split the documents into passages. Default is single process.""" } , ) a__ : int = field( default=16 , metadata={ """help""": """The batch size to use when computing the passages embeddings using the DPR context encoder.""" } , ) @dataclass class lowerCAmelCase__ : a__ : int = field( default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , ) a__ : int = field( default=128 , metadata={ """help""": ( """The number of bi-directional links created for every new element during the HNSW index construction.""" ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) SCREAMING_SNAKE_CASE__ : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ : Union[str, Any] = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
339
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> Dict: __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: shutil.rmtree(self.tmpdirname ) def __A ( self : str ) -> Any: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : List[Any] ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Optional[Any] ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE__ : List[Any] = { "configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"], "tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "AdaptiveEmbedding", "TransfoXLForSequenceClassification", "TransfoXLLMHeadModel", "TransfoXLModel", "TransfoXLPreTrainedModel", "load_tf_weights_in_transfo_xl", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any = [ "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAdaptiveEmbedding", "TFTransfoXLForSequenceClassification", "TFTransfoXLLMHeadModel", "TFTransfoXLMainLayer", "TFTransfoXLModel", "TFTransfoXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None: if start is None: __lowerCamelCase = 0 if end is None: __lowerCamelCase = len(__lowerCAmelCase ) - 1 if start >= end: return __lowerCamelCase = (start + end) // 2 slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) if sequence[end] < sequence[mid]: __lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end] slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
339
1
def __magic_name__ ( __lowerCAmelCase : int ) -> bool: return str(__lowerCAmelCase ) == str(__lowerCAmelCase )[::-1] def __magic_name__ ( __lowerCAmelCase : int ) -> int: return int(__lowerCAmelCase ) + int(str(__lowerCAmelCase )[::-1] ) def __magic_name__ ( __lowerCAmelCase : int = 1_0000 ) -> int: __lowerCamelCase = [] for num in range(1 , __lowerCAmelCase ): __lowerCamelCase = 0 __lowerCamelCase = num while iterations < 50: __lowerCamelCase = sum_reverse(__lowerCAmelCase ) iterations += 1 if is_palindrome(__lowerCAmelCase ): break else: lychrel_nums.append(__lowerCAmelCase ) return len(__lowerCAmelCase ) if __name__ == "__main__": print(F'{solution() = }')
339
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__lowerCAmelCase ) return pairs class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} @property def __A ( self : Dict ) -> int: return len(self.encoder ) def __A ( self : str ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ ) if "\n" in token: __lowerCamelCase = token.replace('''\n''' , ''' __newln__''' ) __lowerCamelCase = token.split(''' ''' ) __lowerCamelCase = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE__ ): continue __lowerCamelCase = token.lower() __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: words.append(SCREAMING_SNAKE_CASE__ ) continue while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) new_word.extend(word[i:j] ) __lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word words.append(SCREAMING_SNAKE_CASE__ ) return " ".join(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = [] __lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) ) return split_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int: __lowerCamelCase = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip() return out_string def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file
339
1
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = "bart" SCREAMING_SNAKE_CASE__ : Dict = True @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> str: if LOAD_DENSE_INDEX: __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __lowerCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __lowerCamelCase = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __lowerCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __lowerCamelCase = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> Optional[int]: if LOAD_DENSE_INDEX: __lowerCamelCase = faiss.StandardGpuResources() __lowerCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __lowerCamelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase = faiss.IndexFlatIP(128 ) __lowerCamelCase = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase ) wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase = (None, None) __lowerCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> List[str]: __lowerCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __lowerCamelCase = elia['''train_eli5'''] __lowerCamelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __lowerCamelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__lowerCAmelCase ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_indexes() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = load_models() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_train_data() def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=10 ) -> List[str]: __lowerCamelCase = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = [elia_train[int(__lowerCAmelCase )] for i in I[0]] return nn_examples def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict="wiki40b" , __lowerCAmelCase : Any="dense" , __lowerCAmelCase : Dict=10 ) -> Union[str, Any]: if source == "none": __lowerCamelCase , __lowerCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase = query_qa_dense_index( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: __lowerCamelCase , __lowerCamelCase = query_es_index( __lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , ) __lowerCamelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __lowerCamelCase = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __lowerCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None), } ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=64 , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[Any]=0.95 , __lowerCAmelCase : List[Any]=0.8 ) -> Any: with torch.no_grad(): __lowerCamelCase = qa_sas_generate( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar SCREAMING_SNAKE_CASE__ : List[str] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" SCREAMING_SNAKE_CASE__ : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE__ : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE__ : str = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.checkbox("Demo options") if demo_options: SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.selectbox( "", action_list, index=3, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = action_list.index(action_st) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = show_type == "Show full text of passages" else: SCREAMING_SNAKE_CASE__ : Any = 3 SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options") if retrieval_options: SCREAMING_SNAKE_CASE__ : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: SCREAMING_SNAKE_CASE__ : List[str] = "wiki40b" SCREAMING_SNAKE_CASE__ : Optional[Any] = "dense" SCREAMING_SNAKE_CASE__ : str = "beam" SCREAMING_SNAKE_CASE__ : List[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 64 SCREAMING_SNAKE_CASE__ : List[Any] = 256 SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.checkbox("Generation options") if generate_options: SCREAMING_SNAKE_CASE__ : Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE__ : str = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Dict = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # start main text SCREAMING_SNAKE_CASE__ : Any = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] SCREAMING_SNAKE_CASE__ : List[str] = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.text_input("Enter your question here:", "") else: SCREAMING_SNAKE_CASE__ : str = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_support(question, source=wiki_source, method="dense", n_results=10) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = make_support(question, source=wiki_source, method="sparse", n_results=10) SCREAMING_SNAKE_CASE__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE__ : Optional[Any] = support_list[:10] SCREAMING_SNAKE_CASE__ : Tuple = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) SCREAMING_SNAKE_CASE__ : Tuple = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE__ : Union[str, Any] = "[{}]({})".format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE__ : Dict = sec_titles.split(" & ") SCREAMING_SNAKE_CASE__ : int = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE__ : Any = find_nearest_training(question) SCREAMING_SNAKE_CASE__ : List[Any] = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) SCREAMING_SNAKE_CASE__ : List[Any] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
339
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : str = ShapEImgaImgPipeline a__ : Union[str, Any] = ["""image"""] a__ : Optional[int] = ["""image"""] a__ : Union[str, Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a__ : List[str] = False @property def __A ( self : Dict ) -> Optional[Any]: return 32 @property def __A ( self : Optional[int] ) -> Optional[int]: return 32 @property def __A ( self : Optional[int] ) -> List[Any]: return self.time_input_dim * 4 @property def __A ( self : str ) -> List[Any]: return 8 @property def __A ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __A ( self : Dict ) -> int: torch.manual_seed(0 ) __lowerCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Tuple ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE__ ) return model def __A ( self : Optional[int] ) -> List[str]: __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , ) __lowerCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> int: __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : str ) -> Tuple: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : str ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : str ) -> Union[str, Any]: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) __lowerCamelCase = pipe( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : List[Any] = { "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = [ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", "NezhaForMultipleChoice", "NezhaForQuestionAnswering", "NezhaForSequenceClassification", "NezhaForTokenClassification", "NezhaModel", "NezhaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ : str = "" SCREAMING_SNAKE_CASE__ : Any = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def __magic_name__ ( ) -> None: __lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) print('''Processing...''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for index, image in enumerate(__lowerCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase = random_chars(32 ) __lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' ) __lowerCamelCase = [] for anno in new_annos[index]: __lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__lowerCAmelCase ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]: __lowerCamelCase = [] __lowerCamelCase = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ): __lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__lowerCAmelCase ) as in_file: __lowerCamelCase = in_file.readlines() __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' ) __lowerCamelCase = [] for obj_list in obj_lists: __lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for idx in range(len(__lowerCAmelCase ) ): __lowerCamelCase = [] __lowerCamelCase = img_list[idx] path_list.append(__lowerCAmelCase ) __lowerCamelCase = anno_list[idx] __lowerCamelCase = cva.imread(__lowerCAmelCase ) if flip_type == 1: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCAmelCase ) new_imgs_list.append(__lowerCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
339
1
from math import loga def __magic_name__ ( __lowerCAmelCase : int ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
339
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( __lowerCAmelCase : Any ) -> int: __lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0] @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(rows * cols * num_images ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) __lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) return data @deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict: __lowerCamelCase = labels_dense.shape[0] __lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes __lowerCamelCase = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase = 1 return labels_one_hot @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(__lowerCAmelCase ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase ) return labels class lowerCAmelCase__ : @deprecated( SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: __lowerCamelCase = 1_00_00 __lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' __lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase = images.astype(numpy.floataa ) __lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 ) __lowerCamelCase = images __lowerCamelCase = labels __lowerCamelCase = 0 __lowerCamelCase = 0 @property def __A ( self : str ) -> Optional[int]: return self._images @property def __A ( self : Any ) -> Dict: return self._labels @property def __A ( self : List[Any] ) -> int: return self._num_examples @property def __A ( self : str ) -> Any: return self._epochs_completed def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str: if fake_data: __lowerCamelCase = [1] * 7_84 __lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE__ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE__ )], ) __lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perma] __lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase = self._num_examples - start __lowerCamelCase = self._images[start : self._num_examples] __lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perm] __lowerCamelCase = self.labels[perm] # Start next epoch __lowerCamelCase = 0 __lowerCamelCase = batch_size - rest_num_examples __lowerCamelCase = self._index_in_epoch __lowerCamelCase = self._images[start:end] __lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: if not gfile.Exists(__lowerCAmelCase ): gfile.MakeDirs(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if not gfile.Exists(__lowerCAmelCase ): urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310 with gfile.GFile(__lowerCAmelCase ) as f: __lowerCamelCase = f.size() print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' ) return filepath @deprecated( __lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase ) __lowerCamelCase = fake() __lowerCamelCase = fake() __lowerCamelCase = fake() return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase ) if not source_url: # empty string check __lowerCamelCase = DEFAULT_SOURCE_URL __lowerCamelCase = '''train-images-idx3-ubyte.gz''' __lowerCamelCase = '''train-labels-idx1-ubyte.gz''' __lowerCamelCase = '''t10k-images-idx3-ubyte.gz''' __lowerCamelCase = '''t10k-labels-idx1-ubyte.gz''' __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) if not 0 <= validation_size <= len(__lowerCAmelCase ): __lowerCamelCase = ( '''Validation size should be between 0 and ''' f'''{len(__lowerCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(__lowerCAmelCase ) __lowerCamelCase = train_images[:validation_size] __lowerCamelCase = train_labels[:validation_size] __lowerCamelCase = train_images[validation_size:] __lowerCamelCase = train_labels[validation_size:] __lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
339
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = """deta""" a__ : int = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=9_00 , SCREAMING_SNAKE_CASE__ : Any=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=6 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=6 , SCREAMING_SNAKE_CASE__ : int=10_24 , SCREAMING_SNAKE_CASE__ : Any=8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[int]="relu" , SCREAMING_SNAKE_CASE__ : int=2_56 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1.0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Tuple="sine" , SCREAMING_SNAKE_CASE__ : Dict=5 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : List[str]=3_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.25 , **SCREAMING_SNAKE_CASE__ : int , ) -> int: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __lowerCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = backbone_config.pop('''model_type''' ) __lowerCamelCase = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = backbone_config __lowerCamelCase = num_queries __lowerCamelCase = max_position_embeddings __lowerCamelCase = d_model __lowerCamelCase = encoder_ffn_dim __lowerCamelCase = encoder_layers __lowerCamelCase = encoder_attention_heads __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = decoder_layers __lowerCamelCase = decoder_attention_heads __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = init_xavier_std __lowerCamelCase = encoder_layerdrop __lowerCamelCase = auxiliary_loss __lowerCamelCase = position_embedding_type # deformable attributes __lowerCamelCase = num_feature_levels __lowerCamelCase = encoder_n_points __lowerCamelCase = decoder_n_points __lowerCamelCase = two_stage __lowerCamelCase = two_stage_num_proposals __lowerCamelCase = with_box_refine __lowerCamelCase = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher __lowerCamelCase = class_cost __lowerCamelCase = bbox_cost __lowerCamelCase = giou_cost # Loss coefficients __lowerCamelCase = mask_loss_coefficient __lowerCamelCase = dice_loss_coefficient __lowerCamelCase = bbox_loss_coefficient __lowerCamelCase = giou_loss_coefficient __lowerCamelCase = eos_coefficient __lowerCamelCase = focal_alpha super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __A ( self : str ) -> int: return self.encoder_attention_heads @property def __A ( self : List[Any] ) -> int: return self.d_model def __A ( self : Optional[Any] ) -> Dict: __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.backbone_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
339
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } SCREAMING_SNAKE_CASE__ : Dict = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = VOCAB_FILES_NAMES a__ : Any = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Optional[Any] = SqueezeBertTokenizer def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = do_lower_case def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str: __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: __lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
339
1
from graphs.minimum_spanning_tree_kruskal import kruskal def __magic_name__ ( ) -> Union[str, Any]: __lowerCamelCase = 9 __lowerCamelCase = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __lowerCamelCase = kruskal(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(__lowerCAmelCase ) == sorted(__lowerCAmelCase )
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool: return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( __lowerCAmelCase : Any ) -> int: __lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0] @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(rows * cols * num_images ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) __lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) return data @deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict: __lowerCamelCase = labels_dense.shape[0] __lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes __lowerCamelCase = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase = 1 return labels_one_hot @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(__lowerCAmelCase ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase ) return labels class lowerCAmelCase__ : @deprecated( SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: __lowerCamelCase = 1_00_00 __lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' __lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase = images.astype(numpy.floataa ) __lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 ) __lowerCamelCase = images __lowerCamelCase = labels __lowerCamelCase = 0 __lowerCamelCase = 0 @property def __A ( self : str ) -> Optional[int]: return self._images @property def __A ( self : Any ) -> Dict: return self._labels @property def __A ( self : List[Any] ) -> int: return self._num_examples @property def __A ( self : str ) -> Any: return self._epochs_completed def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str: if fake_data: __lowerCamelCase = [1] * 7_84 __lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE__ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE__ )], ) __lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perma] __lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase = self._num_examples - start __lowerCamelCase = self._images[start : self._num_examples] __lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perm] __lowerCamelCase = self.labels[perm] # Start next epoch __lowerCamelCase = 0 __lowerCamelCase = batch_size - rest_num_examples __lowerCamelCase = self._index_in_epoch __lowerCamelCase = self._images[start:end] __lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: if not gfile.Exists(__lowerCAmelCase ): gfile.MakeDirs(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if not gfile.Exists(__lowerCAmelCase ): urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310 with gfile.GFile(__lowerCAmelCase ) as f: __lowerCamelCase = f.size() print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' ) return filepath @deprecated( __lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase ) __lowerCamelCase = fake() __lowerCamelCase = fake() __lowerCamelCase = fake() return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase ) if not source_url: # empty string check __lowerCamelCase = DEFAULT_SOURCE_URL __lowerCamelCase = '''train-images-idx3-ubyte.gz''' __lowerCamelCase = '''train-labels-idx1-ubyte.gz''' __lowerCamelCase = '''t10k-images-idx3-ubyte.gz''' __lowerCamelCase = '''t10k-labels-idx1-ubyte.gz''' __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) if not 0 <= validation_size <= len(__lowerCAmelCase ): __lowerCamelCase = ( '''Validation size should be between 0 and ''' f'''{len(__lowerCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(__lowerCAmelCase ) __lowerCamelCase = train_images[:validation_size] __lowerCamelCase = train_labels[:validation_size] __lowerCamelCase = train_images[validation_size:] __lowerCamelCase = train_labels[validation_size:] __lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Dict = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
1
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder SCREAMING_SNAKE_CASE__ : int = "base_with_context" def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ) -> Dict: __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowerCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): __lowerCamelCase = weights[f'''layers_{lyr_num}'''] __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) __lowerCamelCase = ly_weight['''attention'''] __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> Tuple: __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowerCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): __lowerCamelCase = weights[f'''layers_{lyr_num}'''] __lowerCamelCase = ly_weight['''attention'''] __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple: __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowerCAmelCase ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) ) for lyr_num, lyr in enumerate(model.decoders ): __lowerCamelCase = weights[f'''layers_{lyr_num}'''] __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) ) __lowerCamelCase = ly_weight['''self_attention'''] __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowerCamelCase = ly_weight['''MultiHeadDotProductAttention_0'''] __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) ) return model def __magic_name__ ( __lowerCAmelCase : Tuple ) -> int: __lowerCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path ) __lowerCamelCase = jnp.tree_util.tree_map(onp.array , __lowerCAmelCase ) __lowerCamelCase = [ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] __lowerCamelCase = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' ) __lowerCamelCase = inference.parse_training_gin_file(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = inference.InferenceModel(args.checkpoint_path , __lowerCAmelCase ) __lowerCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' ) __lowerCamelCase = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) __lowerCamelCase = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) __lowerCamelCase = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) __lowerCamelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , __lowerCAmelCase ) __lowerCamelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , __lowerCAmelCase ) __lowerCamelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''] , __lowerCAmelCase ) __lowerCamelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' ) __lowerCamelCase = SpectrogramDiffusionPipeline( notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=F'{MODEL}/checkpoint_500000', type=str, required=False, help="Path to the original jax model checkpoint.", ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() main(args)
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. __lowerCamelCase , __lowerCamelCase = y, x % y return abs(__lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: try: __lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __lowerCamelCase = int(nums[0] ) __lowerCamelCase = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
339
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE__ : str = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["MLukeTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids __lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids __lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits __lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean() __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
339
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=0.999 , __lowerCAmelCase : int="cosine" , ) -> int: if alpha_transform_type == "cosine": def alpha_bar_fn(__lowerCAmelCase : int ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__lowerCAmelCase : List[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __lowerCamelCase = [] for i in range(__lowerCAmelCase ): __lowerCamelCase = i / num_diffusion_timesteps __lowerCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) ) return torch.tensor(__lowerCAmelCase , dtype=torch.floataa ) class lowerCAmelCase__ ( __lowercase , __lowercase ): a__ : Any = [e.name for e in KarrasDiffusionSchedulers] a__ : Tuple = 2 @register_to_config def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int = 10_00 , SCREAMING_SNAKE_CASE__ : float = 0.00085 , SCREAMING_SNAKE_CASE__ : float = 0.012 , SCREAMING_SNAKE_CASE__ : str = "linear" , SCREAMING_SNAKE_CASE__ : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE__ : str = "epsilon" , SCREAMING_SNAKE_CASE__ : Optional[bool] = False , SCREAMING_SNAKE_CASE__ : Optional[bool] = False , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : str = "linspace" , SCREAMING_SNAKE_CASE__ : int = 0 , ) -> List[str]: if trained_betas is not None: __lowerCamelCase = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) elif beta_schedule == "linear": __lowerCamelCase = torch.linspace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __lowerCamelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __lowerCamelCase = betas_for_alpha_bar(SCREAMING_SNAKE_CASE__ , alpha_transform_type='''cosine''' ) elif beta_schedule == "exp": __lowerCamelCase = betas_for_alpha_bar(SCREAMING_SNAKE_CASE__ , alpha_transform_type='''exp''' ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) __lowerCamelCase = 1.0 - self.betas __lowerCamelCase = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = use_karras_sigmas def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=None ) -> Optional[int]: if schedule_timesteps is None: __lowerCamelCase = self.timesteps __lowerCamelCase = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __lowerCamelCase = 1 if len(SCREAMING_SNAKE_CASE__ ) > 1 else 0 else: __lowerCamelCase = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep __lowerCamelCase = self._index_counter[timestep_int] return indices[pos].item() @property def __A ( self : Union[str, Any] ) -> Any: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor: __lowerCamelCase = self.index_for_timestep(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.sigmas[step_index] __lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5) return sample def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Any: __lowerCamelCase = num_inference_steps __lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )[::-1].copy() elif self.config.timestep_spacing == "leading": __lowerCamelCase = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowerCamelCase = (np.arange(0 , SCREAMING_SNAKE_CASE__ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE__ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __lowerCamelCase = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowerCamelCase = (np.arange(SCREAMING_SNAKE_CASE__ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE__ ) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) __lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __lowerCamelCase = np.log(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = np.interp(SCREAMING_SNAKE_CASE__ , np.arange(0 , len(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ ) if self.config.use_karras_sigmas: __lowerCamelCase = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE__ , num_inference_steps=self.num_inference_steps ) __lowerCamelCase = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for sigma in sigmas] ) __lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __lowerCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) __lowerCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): # mps does not support float64 __lowerCamelCase = timesteps.to(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) else: __lowerCamelCase = timesteps.to(device=SCREAMING_SNAKE_CASE__ ) # empty dt and derivative __lowerCamelCase = None __lowerCamelCase = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __lowerCamelCase = defaultdict(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: # get log sigma __lowerCamelCase = np.log(SCREAMING_SNAKE_CASE__ ) # get distribution __lowerCamelCase = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range __lowerCamelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) __lowerCamelCase = low_idx + 1 __lowerCamelCase = log_sigmas[low_idx] __lowerCamelCase = log_sigmas[high_idx] # interpolate sigmas __lowerCamelCase = (low - log_sigma) / (low - high) __lowerCamelCase = np.clip(SCREAMING_SNAKE_CASE__ , 0 , 1 ) # transform interpolation to time range __lowerCamelCase = (1 - w) * low_idx + w * high_idx __lowerCamelCase = t.reshape(sigma.shape ) return t def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> torch.FloatTensor: __lowerCamelCase = in_sigmas[-1].item() __lowerCamelCase = in_sigmas[0].item() __lowerCamelCase = 7.0 # 7.0 is the value used in the paper __lowerCamelCase = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = sigma_min ** (1 / rho) __lowerCamelCase = sigma_max ** (1 / rho) __lowerCamelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def __A ( self : str ) -> Tuple: return self.dt is None def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE__ : Union[float, torch.FloatTensor] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Union[SchedulerOutput, Tuple]: __lowerCamelCase = self.index_for_timestep(SCREAMING_SNAKE_CASE__ ) # advance index counter by 1 __lowerCamelCase = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __lowerCamelCase = self.sigmas[step_index] __lowerCamelCase = self.sigmas[step_index + 1] else: # 2nd order / Heun's method __lowerCamelCase = self.sigmas[step_index - 1] __lowerCamelCase = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __lowerCamelCase = 0 __lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next __lowerCamelCase = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next __lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": __lowerCamelCase = model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.config.clip_sample: __lowerCamelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __lowerCamelCase = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __lowerCamelCase = sigma_next - sigma_hat # store for 2nd order step __lowerCamelCase = derivative __lowerCamelCase = dt __lowerCamelCase = sample else: # 2. 2nd order / Heun's method __lowerCamelCase = (sample - pred_original_sample) / sigma_next __lowerCamelCase = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample __lowerCamelCase = self.dt __lowerCamelCase = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__ ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE__ ): # mps does not support float64 __lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __lowerCamelCase = self.timesteps.to(original_samples.device ) __lowerCamelCase = timesteps.to(original_samples.device ) __lowerCamelCase = [self.index_for_timestep(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for t in timesteps] __lowerCamelCase = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __lowerCamelCase = sigma.unsqueeze(-1 ) __lowerCamelCase = original_samples + noise * sigma return noisy_samples def __len__( self : List[Any] ) -> Tuple: return self.config.num_train_timesteps
339
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = "bart" SCREAMING_SNAKE_CASE__ : Dict = True @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> str: if LOAD_DENSE_INDEX: __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __lowerCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __lowerCamelCase = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __lowerCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __lowerCamelCase = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> Optional[int]: if LOAD_DENSE_INDEX: __lowerCamelCase = faiss.StandardGpuResources() __lowerCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __lowerCamelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase = faiss.IndexFlatIP(128 ) __lowerCamelCase = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase ) wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase = (None, None) __lowerCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> List[str]: __lowerCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __lowerCamelCase = elia['''train_eli5'''] __lowerCamelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __lowerCamelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__lowerCAmelCase ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_indexes() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = load_models() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_train_data() def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=10 ) -> List[str]: __lowerCamelCase = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = [elia_train[int(__lowerCAmelCase )] for i in I[0]] return nn_examples def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict="wiki40b" , __lowerCAmelCase : Any="dense" , __lowerCAmelCase : Dict=10 ) -> Union[str, Any]: if source == "none": __lowerCamelCase , __lowerCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase = query_qa_dense_index( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: __lowerCamelCase , __lowerCamelCase = query_es_index( __lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , ) __lowerCamelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __lowerCamelCase = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __lowerCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None), } ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=64 , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[Any]=0.95 , __lowerCAmelCase : List[Any]=0.8 ) -> Any: with torch.no_grad(): __lowerCamelCase = qa_sas_generate( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar SCREAMING_SNAKE_CASE__ : List[str] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" SCREAMING_SNAKE_CASE__ : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE__ : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE__ : str = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.checkbox("Demo options") if demo_options: SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.selectbox( "", action_list, index=3, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = action_list.index(action_st) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = show_type == "Show full text of passages" else: SCREAMING_SNAKE_CASE__ : Any = 3 SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options") if retrieval_options: SCREAMING_SNAKE_CASE__ : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: SCREAMING_SNAKE_CASE__ : List[str] = "wiki40b" SCREAMING_SNAKE_CASE__ : Optional[Any] = "dense" SCREAMING_SNAKE_CASE__ : str = "beam" SCREAMING_SNAKE_CASE__ : List[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 64 SCREAMING_SNAKE_CASE__ : List[Any] = 256 SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.checkbox("Generation options") if generate_options: SCREAMING_SNAKE_CASE__ : Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE__ : str = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Dict = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # start main text SCREAMING_SNAKE_CASE__ : Any = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] SCREAMING_SNAKE_CASE__ : List[str] = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.text_input("Enter your question here:", "") else: SCREAMING_SNAKE_CASE__ : str = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_support(question, source=wiki_source, method="dense", n_results=10) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = make_support(question, source=wiki_source, method="sparse", n_results=10) SCREAMING_SNAKE_CASE__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE__ : Optional[Any] = support_list[:10] SCREAMING_SNAKE_CASE__ : Tuple = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) SCREAMING_SNAKE_CASE__ : Tuple = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE__ : Union[str, Any] = "[{}]({})".format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE__ : Dict = sec_titles.split(" & ") SCREAMING_SNAKE_CASE__ : int = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE__ : Any = find_nearest_training(question) SCREAMING_SNAKE_CASE__ : List[Any] = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) SCREAMING_SNAKE_CASE__ : List[Any] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
339
1
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : int=5 ) -> Tuple: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('''<mask>''' ) == 1 __lowerCamelCase = torch.tensor(tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) ).unsqueeze(0 ) # Batch size 1 __lowerCamelCase = model(__lowerCAmelCase )[0] # The last hidden-state is the first element of the output tuple __lowerCamelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() __lowerCamelCase = logits[0, masked_index, :] __lowerCamelCase = logits.softmax(dim=0 ) __lowerCamelCase , __lowerCamelCase = prob.topk(k=__lowerCAmelCase , dim=0 ) __lowerCamelCase = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__lowerCAmelCase ) )] ) __lowerCamelCase = tokenizer.mask_token __lowerCamelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): __lowerCamelCase = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(__lowerCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(__lowerCAmelCase ) , __lowerCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(__lowerCAmelCase , __lowerCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs SCREAMING_SNAKE_CASE__ : Tuple = CamembertTokenizer.from_pretrained("camembert-base") SCREAMING_SNAKE_CASE__ : Union[str, Any] = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() SCREAMING_SNAKE_CASE__ : Any = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Dict = """xmod""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout __lowerCamelCase = pre_norm __lowerCamelCase = adapter_reduction_factor __lowerCamelCase = adapter_layer_norm __lowerCamelCase = adapter_reuse_layer_norm __lowerCamelCase = ln_before_adapter __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = default_language class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
339
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed SCREAMING_SNAKE_CASE__ : Dict = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def __magic_name__ ( __lowerCAmelCase : Optional[int] ) -> List[Any]: assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ) -> Optional[Any]: if args.student_type == "roberta": __lowerCamelCase = False elif args.student_type == "gpt2": __lowerCamelCase = False def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> List[Any]: if args.student_type == "roberta": __lowerCamelCase = False def __magic_name__ ( ) -> int: __lowerCamelCase = argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=__lowerCAmelCase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__lowerCAmelCase , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__lowerCAmelCase , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=__lowerCAmelCase , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=__lowerCAmelCase , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=__lowerCAmelCase , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.15 , type=__lowerCAmelCase , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=__lowerCAmelCase , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=__lowerCAmelCase , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=__lowerCAmelCase , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=__lowerCAmelCase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=__lowerCAmelCase , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=__lowerCAmelCase , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=__lowerCAmelCase , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.05 , type=__lowerCAmelCase , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=__lowerCAmelCase , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5E-4 , type=__lowerCAmelCase , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=__lowerCAmelCase , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__lowerCAmelCase , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.02 , type=__lowerCAmelCase , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=__lowerCAmelCase , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=__lowerCAmelCase , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=__lowerCAmelCase , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=__lowerCAmelCase , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=__lowerCAmelCase , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=__lowerCAmelCase , default=4000 , help='''Checkpoint interval.''' ) __lowerCamelCase = parser.parse_args() sanity_checks(__lowerCAmelCase ) # ARGS # init_gpu_params(__lowerCAmelCase ) set_seed(__lowerCAmelCase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(f'''Param: {args}''' ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(__lowerCAmelCase ) , __lowerCAmelCase , indent=4 ) git_log(args.dump_path ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = MODEL_CLASSES[args.student_type] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = MODEL_CLASSES[args.teacher_type] # TOKENIZER # __lowerCamelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name ) __lowerCamelCase = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): __lowerCamelCase = tokenizer.all_special_tokens.index(__lowerCAmelCase ) __lowerCamelCase = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''' ) __lowerCamelCase = special_tok_ids __lowerCamelCase = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''' ) with open(args.data_file , '''rb''' ) as fp: __lowerCamelCase = pickle.load(__lowerCAmelCase ) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , '''rb''' ) as fp: __lowerCamelCase = pickle.load(__lowerCAmelCase ) __lowerCamelCase = np.maximum(__lowerCAmelCase , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): __lowerCamelCase = 0.0 # do not predict special tokens __lowerCamelCase = torch.from_numpy(__lowerCAmelCase ) else: __lowerCamelCase = None __lowerCamelCase = LmSeqsDataset(params=__lowerCAmelCase , data=__lowerCAmelCase ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(f'''Loading student config from {args.student_config}''' ) __lowerCamelCase = student_config_class.from_pretrained(args.student_config ) __lowerCamelCase = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' ) __lowerCamelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowerCAmelCase ) else: __lowerCamelCase = student_model_class(__lowerCAmelCase ) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''' ) logger.info('''Student loaded.''' ) # TEACHER # __lowerCamelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowerCAmelCase ) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''' ) logger.info(f'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__lowerCAmelCase , __lowerCAmelCase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__lowerCAmelCase , __lowerCAmelCase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() __lowerCamelCase = Distiller( params=__lowerCAmelCase , dataset=__lowerCAmelCase , token_probs=__lowerCAmelCase , student=__lowerCAmelCase , teacher=__lowerCAmelCase ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
339
from collections import namedtuple import requests from lxml import html # type: ignore SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
339
1
from collections import namedtuple import requests from lxml import html # type: ignore SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
339
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) a__ : Optional[str] = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) a__ : Optional[int] = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} ) a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} ) a__ : bool = field( default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict: logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) ) def __magic_name__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__lowerCAmelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __lowerCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __lowerCamelCase = SeqaSeqDataset # Get datasets __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __lowerCamelCase = ( build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None ) __lowerCamelCase = SeqaSeqTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator( __lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) __lowerCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __lowerCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __lowerCamelCase = train_result.metrics __lowerCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) __lowerCamelCase = data_args.n_val __lowerCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' ) __lowerCamelCase = test_output.metrics __lowerCamelCase = data_args.n_test if trainer.is_world_process_zero(): __lowerCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.predict_with_generate: __lowerCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) __lowerCamelCase = lmap(str.strip , __lowerCAmelCase ) write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
from __future__ import annotations from collections.abc import Callable SCREAMING_SNAKE_CASE__ : int = list[list[float | int]] def __magic_name__ ( __lowerCAmelCase : Matrix , __lowerCAmelCase : Matrix ) -> Matrix: __lowerCamelCase = len(__lowerCAmelCase ) __lowerCamelCase = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )] __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 for row in range(__lowerCAmelCase ): for col in range(__lowerCAmelCase ): __lowerCamelCase = matrix[row][col] __lowerCamelCase = vector[row][0] __lowerCamelCase = 0 __lowerCamelCase = 0 while row < size and col < size: # pivoting __lowerCamelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __lowerCamelCase , __lowerCamelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , __lowerCAmelCase ): __lowerCamelCase = augmented[rowa][col] / augmented[row][col] __lowerCamelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , __lowerCAmelCase ): for row in range(__lowerCAmelCase ): __lowerCamelCase = augmented[row][col] / augmented[col][col] for cola in range(__lowerCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase ) ] def __magic_name__ ( __lowerCAmelCase : list[int] ) -> Callable[[int], int]: __lowerCamelCase = len(__lowerCAmelCase ) __lowerCamelCase = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )] __lowerCamelCase = [[0] for _ in range(__lowerCAmelCase )] __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 for x_val, y_val in enumerate(__lowerCAmelCase ): for col in range(__lowerCAmelCase ): __lowerCamelCase = (x_val + 1) ** (size - col - 1) __lowerCamelCase = y_val __lowerCamelCase = solve(__lowerCAmelCase , __lowerCAmelCase ) def interpolated_func(__lowerCAmelCase : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__lowerCAmelCase ) ) return interpolated_func def __magic_name__ ( __lowerCAmelCase : int ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def __magic_name__ ( __lowerCAmelCase : Callable[[int], int] = question_function , __lowerCAmelCase : int = 10 ) -> int: __lowerCamelCase = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )] __lowerCamelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __lowerCamelCase = 0 __lowerCamelCase = 42 __lowerCamelCase = 42 for poly in polynomials: __lowerCamelCase = 1 while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ): x_val += 1 ret += poly(__lowerCAmelCase ) return ret if __name__ == "__main__": print(F'{solution() = }')
339
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = ScoreSdeVeScheduler() __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Tuple ) -> str: __lowerCamelCase = '''google/ncsnpp-church-256''' __lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
1
import sys from collections import defaultdict class lowerCAmelCase__ : def __init__( self : List[str] ) -> List[str]: __lowerCamelCase = [] def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict: return self.node_position[vertex] def __A ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: __lowerCamelCase = pos def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase = 2 * start + 1 else: __lowerCamelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase = temp, tempa __lowerCamelCase = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , SCREAMING_SNAKE_CASE__ ) self.top_to_bottom(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int: __lowerCamelCase = position[index] while index != 0: __lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase = heap[parent] __lowerCamelCase = position[parent] self.set_position(position[parent] , SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) break __lowerCamelCase = parent else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(SCREAMING_SNAKE_CASE__ , 0 ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) // 2 - 1 for i in range(SCREAMING_SNAKE_CASE__ , -1 , -1 ): self.top_to_bottom(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any: __lowerCamelCase = positions[0] __lowerCamelCase = sys.maxsize self.top_to_bottom(SCREAMING_SNAKE_CASE__ , 0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) return temp def __magic_name__ ( __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = Heap() __lowerCamelCase = [0] * len(__lowerCAmelCase ) __lowerCamelCase = [-1] * len(__lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase = [] for vertex in range(len(__lowerCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__lowerCAmelCase ) heap.node_position.append(__lowerCAmelCase ) __lowerCamelCase = [] __lowerCamelCase = 1 __lowerCamelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase = 0 __lowerCamelCase = distance heap.heapify(__lowerCAmelCase , __lowerCAmelCase ) for _ in range(1 , len(__lowerCAmelCase ) ): __lowerCamelCase = heap.delete_minimum(__lowerCAmelCase , __lowerCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__lowerCAmelCase )] ): __lowerCamelCase = distance heap.bottom_to_top( __lowerCAmelCase , heap.get_position(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > SCREAMING_SNAKE_CASE__ : List[str] = int(input("Enter number of edges: ").strip()) SCREAMING_SNAKE_CASE__ : List[str] = defaultdict(list) for _ in range(edges_number): SCREAMING_SNAKE_CASE__ : Dict = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
339
from functools import lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> set: __lowerCamelCase = 2 __lowerCamelCase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__lowerCAmelCase ) if n > 1: factors.add(__lowerCAmelCase ) return factors @lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> int: return len(unique_prime_factors(__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : list ) -> bool: return len(set(__lowerCAmelCase ) ) in (0, 1) def __magic_name__ ( __lowerCAmelCase : int ) -> list: __lowerCamelCase = 2 while True: # Increment each value of a generated range __lowerCamelCase = [base + i for i in range(__lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. __lowerCamelCase = [upf_len(__lowerCAmelCase ) for x in group] checker.append(__lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(__lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def __magic_name__ ( __lowerCAmelCase : int = 4 ) -> int: __lowerCamelCase = run(__lowerCAmelCase ) return results[0] if len(__lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
339
1
def __magic_name__ ( __lowerCAmelCase : int ) -> int: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = f'''Input value of [number={number}] must be an integer''' raise TypeError(__lowerCAmelCase ) if number < 1: __lowerCamelCase = f'''Input value of [number={number}] must be > 0''' raise ValueError(__lowerCAmelCase ) __lowerCamelCase = 1 for i in range(1 , __lowerCAmelCase ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
339
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def __A ( self : Any ) -> Tuple: return TaConfig.from_pretrained('''google/umt5-base''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]: if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, input_dict def __A ( self : Tuple ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __A ( self : Optional[Any] ) -> Any: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : List[Any] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval() # first forward pass __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval() __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() ) @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ : Tuple = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ : int = True a__ : int = False a__ : Tuple = False a__ : Optional[int] = True a__ : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ : Tuple = [0.8, 0.9] def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __A ( self : Union[str, Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Any: __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() model.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __A ( self : Tuple ) -> Optional[Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__lowerCAmelCase ) return pairs class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} @property def __A ( self : Dict ) -> int: return len(self.encoder ) def __A ( self : str ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ ) if "\n" in token: __lowerCamelCase = token.replace('''\n''' , ''' __newln__''' ) __lowerCamelCase = token.split(''' ''' ) __lowerCamelCase = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE__ ): continue __lowerCamelCase = token.lower() __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: words.append(SCREAMING_SNAKE_CASE__ ) continue while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) new_word.extend(word[i:j] ) __lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word words.append(SCREAMING_SNAKE_CASE__ ) return " ".join(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = [] __lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) ) return split_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int: __lowerCamelCase = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip() return out_string def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file
339
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Union[str, Any] = """open-llama""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict: __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = rms_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_dropout_prob __lowerCamelCase = use_stable_embedding __lowerCamelCase = shared_input_output_embedding __lowerCamelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) __lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
339
1
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json", "RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json", "RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json", "RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json", "RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json", "RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json", "RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json", "RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json", "RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json", "RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[Any] = """rwkv""" a__ : Optional[int] = {"""max_position_embeddings""": """context_length"""} def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=5_02_77 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10_24 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=40_96 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str: __lowerCamelCase = vocab_size __lowerCamelCase = context_length __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size __lowerCamelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size __lowerCamelCase = layer_norm_epsilon __lowerCamelCase = rescale_every __lowerCamelCase = use_cache __lowerCamelCase = bos_token_id __lowerCamelCase = eos_token_id super().__init__( tie_word_embeddings=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
339
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY") SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL") @dataclass(frozen=__lowercase , slots=__lowercase ) class lowerCAmelCase__ ( Generic[KEY, VAL] ): a__ : KEY a__ : VAL class lowerCAmelCase__ ( _Item ): def __init__( self : str ) -> None: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __bool__( self : Tuple ) -> bool: return False SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem() class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ): def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None: __lowerCamelCase = initial_block_size __lowerCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowerCamelCase = capacity_factor __lowerCamelCase = 0 def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int: return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int: return (ind + 1) % len(self._buckets ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool: __lowerCamelCase = self._buckets[ind] if not stored: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self._len += 1 return True elif stored.key == key: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return True else: return False def __A ( self : Any ) -> bool: __lowerCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False __lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None: __lowerCamelCase = self._buckets __lowerCamelCase = [None] * new_size __lowerCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __A ( self : str ) -> None: self._resize(len(self._buckets ) * 2 ) def __A ( self : Dict ) -> None: self._resize(len(self._buckets ) // 2 ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]: __lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ ) for _ in range(len(self._buckets ) ): yield ind __lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): break def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE__ ) if item is _deleted: continue if item.key == key: __lowerCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE__ ) def __len__( self : int ) -> int: return self._len def __iter__( self : Tuple ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ) -> str: __lowerCamelCase = ''' ,'''.join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
339
1
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
339
from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE__ : Any = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def __magic_name__ ( ) -> Any: __lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCamelCase = g.get_repo('''huggingface/transformers''' ) __lowerCamelCase = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) __lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
339
1
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase ): a__ : Optional[Any] = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 5_02_57 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "gelu_new" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 1e-5 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[Any]: super().__init__() __lowerCamelCase = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) __lowerCamelCase = prefix_inner_dim __lowerCamelCase = prefix_hidden_dim __lowerCamelCase = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) __lowerCamelCase = ( nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE__ ) if self.prefix_hidden_dim is not None else nn.Identity() ) __lowerCamelCase = GPTaConfig( vocab_size=SCREAMING_SNAKE_CASE__ , n_positions=SCREAMING_SNAKE_CASE__ , n_embd=SCREAMING_SNAKE_CASE__ , n_layer=SCREAMING_SNAKE_CASE__ , n_head=SCREAMING_SNAKE_CASE__ , n_inner=SCREAMING_SNAKE_CASE__ , activation_function=SCREAMING_SNAKE_CASE__ , resid_pdrop=SCREAMING_SNAKE_CASE__ , embd_pdrop=SCREAMING_SNAKE_CASE__ , attn_pdrop=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , initializer_range=SCREAMING_SNAKE_CASE__ , scale_attn_weights=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE__ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = GPTaLMHeadModel(SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , ) -> Optional[Any]: __lowerCamelCase = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.encode_prefix(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.decode_prefix(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: __lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) __lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 ) __lowerCamelCase = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.device ) -> torch.Tensor: return torch.zeros(SCREAMING_SNAKE_CASE__ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple: return self.encode_prefix(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int: __lowerCamelCase = torch.split(SCREAMING_SNAKE_CASE__ , 1 , dim=0 ) __lowerCamelCase = [] __lowerCamelCase = [] for feature in features: __lowerCamelCase = self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE__ ) ) # back to the clip feature # Only support beam search for now __lowerCamelCase , __lowerCamelCase = self.generate_beam( input_embeds=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) __lowerCamelCase = torch.stack(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.stack(SCREAMING_SNAKE_CASE__ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __A ( self : str , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : int = 67 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Union[str, Any]: __lowerCamelCase = eos_token_id __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = torch.ones(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.int ) __lowerCamelCase = torch.zeros(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.bool ) if input_embeds is not None: __lowerCamelCase = input_embeds else: __lowerCamelCase = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = outputs.logits __lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) __lowerCamelCase = logits.softmax(-1 ).log() if scores is None: __lowerCamelCase , __lowerCamelCase = logits.topk(SCREAMING_SNAKE_CASE__ , -1 ) __lowerCamelCase = generated.expand(SCREAMING_SNAKE_CASE__ , *generated.shape[1:] ) __lowerCamelCase , __lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: __lowerCamelCase = next_tokens else: __lowerCamelCase = tokens.expand(SCREAMING_SNAKE_CASE__ , *tokens.shape[1:] ) __lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 ) else: __lowerCamelCase = -float(np.inf ) __lowerCamelCase = 0 __lowerCamelCase = scores[:, None] + logits seq_lengths[~is_stopped] += 1 __lowerCamelCase = scores_sum / seq_lengths[:, None] __lowerCamelCase , __lowerCamelCase = scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE__ , -1 ) __lowerCamelCase = next_tokens // scores_sum.shape[1] __lowerCamelCase = seq_lengths[next_tokens_source] __lowerCamelCase = next_tokens % scores_sum.shape[1] __lowerCamelCase = next_tokens.unsqueeze(1 ) __lowerCamelCase = tokens[next_tokens_source] __lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 ) __lowerCamelCase = generated[next_tokens_source] __lowerCamelCase = scores_sum_average * seq_lengths __lowerCamelCase = is_stopped[next_tokens_source] __lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) __lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 ) __lowerCamelCase = is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE__ ).squeeze() if is_stopped.all(): break __lowerCamelCase = scores / seq_lengths __lowerCamelCase = scores.argsort(descending=SCREAMING_SNAKE_CASE__ ) # tokens tensors are already padded to max_seq_length __lowerCamelCase = [tokens[i] for i in order] __lowerCamelCase = torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 ) __lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["bert-base-uncased", "bert-base-cased"] SCREAMING_SNAKE_CASE__ : Optional[int] = "hf-internal-testing/tiny-bert-tf-only" if is_tf_available(): class lowerCAmelCase__ ( tf.keras.Model ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]: super().__init__() __lowerCamelCase = tokenizer __lowerCamelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = TFAutoModel.from_config(SCREAMING_SNAKE_CASE__ ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.bert(**SCREAMING_SNAKE_CASE__ ) return out["pooler_output"] @require_tf @require_tensorflow_text class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Optional[int] ) -> Tuple: super().setUp() __lowerCamelCase = [ BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false __lowerCamelCase = [TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , use_fast_bert_tokenizer=SCREAMING_SNAKE_CASE__ ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __lowerCamelCase = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] __lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __A ( self : Union[str, Any] ) -> Optional[Any]: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''tf''' , padding='''longest''' ) __lowerCamelCase = tf_tokenizer(SCREAMING_SNAKE_CASE__ ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def __A ( self : List[Any] ) -> str: for tf_tokenizer in self.tf_tokenizers: __lowerCamelCase = tf_tokenizer(self.paired_sentences ) __lowerCamelCase = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def __A ( self : str ) -> Optional[Any]: for tf_tokenizer in self.tf_tokenizers: __lowerCamelCase = tf.function(SCREAMING_SNAKE_CASE__ ) for test_inputs in (self.test_sentences, self.paired_sentences): __lowerCamelCase = tf.constant(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = compiled_tokenizer(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tf_tokenizer(SCREAMING_SNAKE_CASE__ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __A ( self : str ) -> int: for tf_tokenizer in self.tf_tokenizers: __lowerCamelCase = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tf.convert_to_tensor(self.test_sentences ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __lowerCamelCase = Path(SCREAMING_SNAKE_CASE__ ) / '''saved.model''' model.save(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tf.keras.models.load_model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = loaded_model(SCREAMING_SNAKE_CASE__ ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
339
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> Dict: __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: shutil.rmtree(self.tmpdirname ) def __A ( self : str ) -> Any: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : List[Any] ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Optional[Any] ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) # TODO Update this SCREAMING_SNAKE_CASE__ : Any = { "facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json", # See all ESM models at https://huggingface.co/models?filter=esm } class lowerCAmelCase__ ( __lowercase ): a__ : Tuple = """esm""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_68 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : List[Any]=30_72 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=10_26 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : List[Any]="absolute" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , mask_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = emb_layer_norm_before __lowerCamelCase = token_dropout __lowerCamelCase = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) __lowerCamelCase = EsmFoldConfig() elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = EsmFoldConfig(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) __lowerCamelCase = get_default_vocab_list() else: __lowerCamelCase = vocab_list else: __lowerCamelCase = None __lowerCamelCase = None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , SCREAMING_SNAKE_CASE__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def __A ( self : int ) -> List[Any]: __lowerCamelCase = super().to_dict() if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self.esmfold_config.to_dict() return output @dataclass class lowerCAmelCase__ : a__ : str = None a__ : bool = True a__ : bool = False a__ : bool = False a__ : bool = False a__ : float = 0 a__ : bool = True a__ : bool = False a__ : int = 128 a__ : "TrunkConfig" = None def __A ( self : Any ) -> List[Any]: if self.trunk is None: __lowerCamelCase = TrunkConfig() elif isinstance(self.trunk , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = TrunkConfig(**self.trunk ) def __A ( self : str ) -> Any: __lowerCamelCase = asdict(self ) __lowerCamelCase = self.trunk.to_dict() return output @dataclass class lowerCAmelCase__ : a__ : int = 48 a__ : int = 1_024 a__ : int = 128 a__ : int = 32 a__ : int = 32 a__ : int = 32 a__ : float = 0 a__ : float = 0 a__ : bool = False a__ : int = 4 a__ : Optional[int] = 128 a__ : "StructureModuleConfig" = None def __A ( self : Tuple ) -> List[str]: if self.structure_module is None: __lowerCamelCase = StructureModuleConfig() elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) __lowerCamelCase = self.sequence_state_dim // self.sequence_head_width __lowerCamelCase = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def __A ( self : Dict ) -> Optional[Any]: __lowerCamelCase = asdict(self ) __lowerCamelCase = self.structure_module.to_dict() return output @dataclass class lowerCAmelCase__ : a__ : int = 384 a__ : int = 128 a__ : int = 16 a__ : int = 128 a__ : int = 12 a__ : int = 4 a__ : int = 8 a__ : float = 0.1 a__ : int = 8 a__ : int = 1 a__ : int = 2 a__ : int = 7 a__ : int = 10 a__ : float = 1e-8 a__ : float = 1e5 def __A ( self : Optional[Any] ) -> str: return asdict(self ) def __magic_name__ ( ) -> Optional[int]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None: if start is None: __lowerCamelCase = 0 if end is None: __lowerCamelCase = len(__lowerCAmelCase ) - 1 if start >= end: return __lowerCamelCase = (start + end) // 2 slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) if sequence[end] < sequence[mid]: __lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end] slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
339
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=18 , SCREAMING_SNAKE_CASE__ : Optional[int]=30 , SCREAMING_SNAKE_CASE__ : Dict=4_00 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.48145466, 0.4578275, 0.40821073] , SCREAMING_SNAKE_CASE__ : Any=[0.26862954, 0.26130258, 0.27577711] , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Any: __lowerCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24} __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = min_resolution __lowerCamelCase = max_resolution __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_normalize __lowerCamelCase = image_mean __lowerCamelCase = image_std __lowerCamelCase = do_convert_rgb def __A ( self : int ) -> Union[str, Any]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> List[Any]: assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __lowerCamelCase = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: __lowerCamelCase = [] for i in range(self.batch_size ): __lowerCamelCase , __lowerCamelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] if torchify: __lowerCamelCase = [torch.from_numpy(SCREAMING_SNAKE_CASE__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : int = ChineseCLIPImageProcessor if is_vision_available() else None def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE__ ) @property def __A ( self : str ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Optional[Any] ) -> Any: __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''center_crop''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) ) def __A ( self : Dict ) -> Any: __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 2_24, '''width''': 2_24} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __A ( self : Union[str, Any] ) -> int: pass def __A ( self : Tuple ) -> Optional[int]: # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __A ( self : List[str] ) -> int: # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __A ( self : Optional[Any] ) -> Optional[Any]: # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None def __A ( self : Tuple ) -> Union[str, Any]: __lowerCamelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 3 @property def __A ( self : Dict ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Any ) -> Tuple: __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''center_crop''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) ) def __A ( self : Any ) -> List[str]: pass def __A ( self : Optional[Any] ) -> Union[str, Any]: # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
339
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__lowerCAmelCase ) return pairs class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} @property def __A ( self : Dict ) -> int: return len(self.encoder ) def __A ( self : str ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ ) if "\n" in token: __lowerCamelCase = token.replace('''\n''' , ''' __newln__''' ) __lowerCamelCase = token.split(''' ''' ) __lowerCamelCase = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE__ ): continue __lowerCamelCase = token.lower() __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: words.append(SCREAMING_SNAKE_CASE__ ) continue while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) new_word.extend(word[i:j] ) __lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word words.append(SCREAMING_SNAKE_CASE__ ) return " ".join(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = [] __lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) ) return split_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int: __lowerCamelCase = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip() return out_string def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file
339
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : str=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=1 / 2_55 , SCREAMING_SNAKE_CASE__ : List[str]=True , ) -> Any: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = min_resolution __lowerCamelCase = max_resolution __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = do_normalize __lowerCamelCase = image_mean __lowerCamelCase = image_std __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_pad def __A ( self : int ) -> List[str]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=False ) -> Tuple: if not batched: __lowerCamelCase = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ): __lowerCamelCase , __lowerCamelCase = image.size else: __lowerCamelCase , __lowerCamelCase = image.shape[1], image.shape[2] if w < h: __lowerCamelCase = int(self.size['''shortest_edge'''] * h / w ) __lowerCamelCase = self.size['''shortest_edge'''] elif w > h: __lowerCamelCase = self.size['''shortest_edge'''] __lowerCamelCase = int(self.size['''shortest_edge'''] * w / h ) else: __lowerCamelCase = self.size['''shortest_edge'''] __lowerCamelCase = self.size['''shortest_edge'''] else: __lowerCamelCase = [] for image in image_inputs: __lowerCamelCase , __lowerCamelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCamelCase = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0] __lowerCamelCase = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : int = DetaImageProcessor if is_vision_available() else None def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = DetaImageProcessingTester(self ) @property def __A ( self : int ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Any ) -> Dict: __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_rescale''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_pad''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) ) def __A ( self : Optional[Any] ) -> Optional[int]: __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ ) def __A ( self : str ) -> Any: pass def __A ( self : List[str] ) -> Dict: # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __A ( self : List[Any] ) -> Tuple: # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values __lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __A ( self : Any ) -> int: # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values __lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __A ( self : Union[str, Any] ) -> int: # prepare image and target __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: __lowerCamelCase = json.loads(f.read() ) __lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target} # encode them __lowerCamelCase = DetaImageProcessor() __lowerCamelCase = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ) # verify pixel values __lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) ) # verify area __lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE__ ) ) # verify boxes __lowerCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) # verify image_id __lowerCamelCase = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE__ ) ) # verify is_crowd __lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE__ ) ) # verify class_labels __lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE__ ) ) # verify orig_size __lowerCamelCase = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE__ ) ) # verify size __lowerCamelCase = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE__ ) ) @slow def __A ( self : Dict ) -> Optional[int]: # prepare image, target and masks_path __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: __lowerCamelCase = json.loads(f.read() ) __lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} __lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them __lowerCamelCase = DetaImageProcessor(format='''coco_panoptic''' ) __lowerCamelCase = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ) # verify pixel values __lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) ) # verify area __lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE__ ) ) # verify boxes __lowerCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) # verify image_id __lowerCamelCase = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE__ ) ) # verify is_crowd __lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE__ ) ) # verify class_labels __lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE__ ) ) # verify masks __lowerCamelCase = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , SCREAMING_SNAKE_CASE__ ) # verify orig_size __lowerCamelCase = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE__ ) ) # verify size __lowerCamelCase = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE__ ) )
339
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : str = ShapEImgaImgPipeline a__ : Union[str, Any] = ["""image"""] a__ : Optional[int] = ["""image"""] a__ : Union[str, Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a__ : List[str] = False @property def __A ( self : Dict ) -> Optional[Any]: return 32 @property def __A ( self : Optional[int] ) -> Optional[int]: return 32 @property def __A ( self : Optional[int] ) -> List[Any]: return self.time_input_dim * 4 @property def __A ( self : str ) -> List[Any]: return 8 @property def __A ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __A ( self : Dict ) -> int: torch.manual_seed(0 ) __lowerCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Tuple ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE__ ) return model def __A ( self : Optional[int] ) -> List[str]: __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , ) __lowerCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> int: __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : str ) -> Tuple: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : str ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : str ) -> Union[str, Any]: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) __lowerCamelCase = pipe( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Dict = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ : str = "" SCREAMING_SNAKE_CASE__ : Any = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def __magic_name__ ( ) -> None: __lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) print('''Processing...''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for index, image in enumerate(__lowerCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase = random_chars(32 ) __lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' ) __lowerCamelCase = [] for anno in new_annos[index]: __lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__lowerCAmelCase ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]: __lowerCamelCase = [] __lowerCamelCase = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ): __lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__lowerCAmelCase ) as in_file: __lowerCamelCase = in_file.readlines() __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' ) __lowerCamelCase = [] for obj_list in obj_lists: __lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for idx in range(len(__lowerCAmelCase ) ): __lowerCamelCase = [] __lowerCamelCase = img_list[idx] path_list.append(__lowerCAmelCase ) __lowerCamelCase = anno_list[idx] __lowerCamelCase = cva.imread(__lowerCAmelCase ) if flip_type == 1: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCAmelCase ) new_imgs_list.append(__lowerCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
339
1
import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __magic_name__ ( __lowerCAmelCase : Union[str, Any] ) -> Tuple: __lowerCamelCase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Any: __lowerCamelCase = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: __lowerCamelCase = s_dict.pop(__lowerCAmelCase ) elif "subsample" in key: __lowerCamelCase = s_dict.pop(__lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : Tuple ) -> Any: __lowerCamelCase , __lowerCamelCase = emb.weight.shape __lowerCamelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) __lowerCamelCase = emb.weight.data return lin_layer def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ) -> Optional[Any]: __lowerCamelCase = torch.load(__lowerCAmelCase , map_location='''cpu''' ) __lowerCamelCase = mam_aaa['''args'''] __lowerCamelCase = mam_aaa['''model'''] __lowerCamelCase = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(__lowerCAmelCase ) rename_keys(__lowerCAmelCase ) __lowerCamelCase = state_dict['''decoder.embed_tokens.weight'''].shape[0] __lowerCamelCase = args.share_decoder_input_output_embed __lowerCamelCase = [int(__lowerCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )] __lowerCamelCase = SpeechaTextConfig( vocab_size=__lowerCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__lowerCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__lowerCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__lowerCAmelCase , num_beams=5 , max_length=200 , use_cache=__lowerCAmelCase , decoder_start_token_id=2 , early_stopping=__lowerCAmelCase , ) __lowerCamelCase = SpeechaTextForConditionalGeneration(__lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0 and not set(__lowerCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f''' but all the following weights are missing {missing}''' ) if tie_embeds: __lowerCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: __lowerCamelCase = lm_head_weights model.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
339
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( __lowerCAmelCase : Any ) -> int: __lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0] @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(rows * cols * num_images ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) __lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) return data @deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict: __lowerCamelCase = labels_dense.shape[0] __lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes __lowerCamelCase = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase = 1 return labels_one_hot @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(__lowerCAmelCase ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase ) return labels class lowerCAmelCase__ : @deprecated( SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: __lowerCamelCase = 1_00_00 __lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' __lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase = images.astype(numpy.floataa ) __lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 ) __lowerCamelCase = images __lowerCamelCase = labels __lowerCamelCase = 0 __lowerCamelCase = 0 @property def __A ( self : str ) -> Optional[int]: return self._images @property def __A ( self : Any ) -> Dict: return self._labels @property def __A ( self : List[Any] ) -> int: return self._num_examples @property def __A ( self : str ) -> Any: return self._epochs_completed def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str: if fake_data: __lowerCamelCase = [1] * 7_84 __lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE__ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE__ )], ) __lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perma] __lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase = self._num_examples - start __lowerCamelCase = self._images[start : self._num_examples] __lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perm] __lowerCamelCase = self.labels[perm] # Start next epoch __lowerCamelCase = 0 __lowerCamelCase = batch_size - rest_num_examples __lowerCamelCase = self._index_in_epoch __lowerCamelCase = self._images[start:end] __lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: if not gfile.Exists(__lowerCAmelCase ): gfile.MakeDirs(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if not gfile.Exists(__lowerCAmelCase ): urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310 with gfile.GFile(__lowerCAmelCase ) as f: __lowerCamelCase = f.size() print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' ) return filepath @deprecated( __lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase ) __lowerCamelCase = fake() __lowerCamelCase = fake() __lowerCamelCase = fake() return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase ) if not source_url: # empty string check __lowerCamelCase = DEFAULT_SOURCE_URL __lowerCamelCase = '''train-images-idx3-ubyte.gz''' __lowerCamelCase = '''train-labels-idx1-ubyte.gz''' __lowerCamelCase = '''t10k-images-idx3-ubyte.gz''' __lowerCamelCase = '''t10k-labels-idx1-ubyte.gz''' __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) if not 0 <= validation_size <= len(__lowerCAmelCase ): __lowerCamelCase = ( '''Validation size should be between 0 and ''' f'''{len(__lowerCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(__lowerCAmelCase ) __lowerCamelCase = train_images[:validation_size] __lowerCamelCase = train_labels[:validation_size] __lowerCamelCase = train_images[validation_size:] __lowerCamelCase = train_labels[validation_size:] __lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
339
1
from collections.abc import Generator from math import sin def __magic_name__ ( __lowerCAmelCase : bytes ) -> bytes: if len(__lowerCAmelCase ) != 32: raise ValueError('''Input must be of length 32''' ) __lowerCamelCase = B'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __magic_name__ ( __lowerCAmelCase : int ) -> bytes: if i < 0: raise ValueError('''Input must be non-negative''' ) __lowerCamelCase = format(__lowerCAmelCase , '''08x''' )[-8:] __lowerCamelCase = B'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def __magic_name__ ( __lowerCAmelCase : bytes ) -> bytes: __lowerCamelCase = B'''''' for char in message: bit_string += format(__lowerCAmelCase , '''08b''' ).encode('''utf-8''' ) __lowerCamelCase = format(len(__lowerCAmelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__lowerCAmelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __magic_name__ ( __lowerCAmelCase : bytes ) -> Generator[list[int], None, None]: if len(__lowerCAmelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(__lowerCAmelCase ) , 512 ): __lowerCamelCase = bit_string[pos : pos + 512] __lowerCamelCase = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def __magic_name__ ( __lowerCAmelCase : int ) -> int: if i < 0: raise ValueError('''Input must be non-negative''' ) __lowerCamelCase = format(__lowerCAmelCase , '''032b''' ) __lowerCamelCase = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(__lowerCAmelCase , 2 ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return (a + b) % 2**32 def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __magic_name__ ( __lowerCAmelCase : bytes ) -> bytes: __lowerCamelCase = preprocess(__lowerCAmelCase ) __lowerCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __lowerCamelCase = 0X6745_2301 __lowerCamelCase = 0Xefcd_ab89 __lowerCamelCase = 0X98ba_dcfe __lowerCamelCase = 0X1032_5476 __lowerCamelCase = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__lowerCAmelCase ): __lowerCamelCase = aa __lowerCamelCase = ba __lowerCamelCase = ca __lowerCamelCase = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __lowerCamelCase = d ^ (b & (c ^ d)) __lowerCamelCase = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __lowerCamelCase = c ^ (d & (b ^ c)) __lowerCamelCase = (5 * i + 1) % 16 elif i <= 47: __lowerCamelCase = b ^ c ^ d __lowerCamelCase = (3 * i + 5) % 16 else: __lowerCamelCase = c ^ (b | not_aa(__lowerCAmelCase )) __lowerCamelCase = (7 * i) % 16 __lowerCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32 __lowerCamelCase = d __lowerCamelCase = c __lowerCamelCase = b __lowerCamelCase = sum_aa(__lowerCAmelCase , left_rotate_aa(__lowerCAmelCase , shift_amounts[i] ) ) # Add hashed chunk to running total __lowerCamelCase = sum_aa(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = sum_aa(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = sum_aa(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = sum_aa(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
339
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } SCREAMING_SNAKE_CASE__ : Dict = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = VOCAB_FILES_NAMES a__ : Any = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Optional[Any] = SqueezeBertTokenizer def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = do_lower_case def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str: __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: __lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
339
1
import math import os import sys def __magic_name__ ( __lowerCAmelCase : str ) -> str: __lowerCamelCase = '''''' try: with open(__lowerCAmelCase , '''rb''' ) as binary_file: __lowerCamelCase = binary_file.read() for dat in data: __lowerCamelCase = f'''{dat:08b}''' result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def __magic_name__ ( __lowerCAmelCase : dict[str, str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : str ) -> None: lexicon.pop(__lowerCAmelCase ) __lowerCamelCase = last_match_id if math.loga(__lowerCAmelCase ).is_integer(): for curr_key in lexicon: __lowerCamelCase = '''0''' + lexicon[curr_key] __lowerCamelCase = bin(__lowerCAmelCase )[2:] def __magic_name__ ( __lowerCAmelCase : str ) -> str: __lowerCamelCase = {'''0''': '''0''', '''1''': '''1'''} __lowerCamelCase , __lowerCamelCase = '''''', '''''' __lowerCamelCase = len(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __lowerCamelCase = lexicon[curr_string] result += last_match_id add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) index += 1 __lowerCamelCase = '''''' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __lowerCamelCase = lexicon[curr_string] result += last_match_id return result def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> str: __lowerCamelCase = os.path.getsize(__lowerCAmelCase ) __lowerCamelCase = bin(__lowerCAmelCase )[2:] __lowerCamelCase = len(__lowerCAmelCase ) return "0" * (length_length - 1) + file_length_binary + compressed def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> None: __lowerCamelCase = 8 try: with open(__lowerCAmelCase , '''wb''' ) as opened_file: __lowerCamelCase = [ to_write[i : i + byte_length] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> None: __lowerCamelCase = read_file_binary(__lowerCAmelCase ) __lowerCamelCase = compress_data(__lowerCAmelCase ) __lowerCamelCase = add_file_length(__lowerCAmelCase , __lowerCAmelCase ) write_file_binary(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool: return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
from math import asin, atan, cos, radians, sin, sqrt, tan SCREAMING_SNAKE_CASE__ : Optional[Any] = 6_3_7_8_1_3_7.0 SCREAMING_SNAKE_CASE__ : Tuple = 6_3_5_6_7_5_2.3_1_4_2_4_5 SCREAMING_SNAKE_CASE__ : Dict = 6_378_137 def __magic_name__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float: __lowerCamelCase = (AXIS_A - AXIS_B) / AXIS_A __lowerCamelCase = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) ) __lowerCamelCase = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) ) __lowerCamelCase = radians(__lowerCAmelCase ) __lowerCamelCase = radians(__lowerCAmelCase ) # Equation __lowerCamelCase = sin((phi_a - phi_a) / 2 ) __lowerCamelCase = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda __lowerCamelCase = sqrt(sin_sq_phi + (cos(__lowerCAmelCase ) * cos(__lowerCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Dict = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
1
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : bool = False ) -> bool: if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable: raise ValueError( '''Warning: upper bound of deterministic test is exceeded. ''' '''Pass allow_probable=True to allow probabilistic test. ''' '''A return value of True indicates a probable prime.''' ) # array bounds provided by analysis __lowerCamelCase = [ 2047, 137_3653, 2532_6001, 32_1503_1751, 2_1523_0289_8747, 3_4747_4966_0383, 341_5500_7172_8321, 1, 382_5123_0565_4641_3051, 1, 1, 3186_6585_7834_0311_5116_7461, 3_3170_4406_4679_8873_8596_1981, ] __lowerCamelCase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(_UpperCAmelCase , 1 ): if n < _p: # then we have our last prime to check __lowerCamelCase = primes[:idx] break __lowerCamelCase = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: __lowerCamelCase = False for r in range(_UpperCAmelCase ): __lowerCamelCase = pow(_UpperCAmelCase , d * 2**r , _UpperCAmelCase ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): __lowerCamelCase = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def __magic_name__ ( ) -> None: assert not miller_rabin(561 ) assert miller_rabin(563 ) # 2047 assert not miller_rabin(83_8201 ) assert miller_rabin(83_8207 ) # 1_373_653 assert not miller_rabin(1731_6001 ) assert miller_rabin(1731_6017 ) # 25_326_001 assert not miller_rabin(30_7838_6641 ) assert miller_rabin(30_7838_6653 ) # 3_215_031_751 assert not miller_rabin(1_7130_4557_4801 ) assert miller_rabin(1_7130_4557_4819 ) # 2_152_302_898_747 assert not miller_rabin(2_7797_9972_8307 ) assert miller_rabin(2_7797_9972_8327 ) # 3_474_749_660_383 assert not miller_rabin(113_8500_2390_9441 ) assert miller_rabin(113_8500_2390_9527 ) # 341_550_071_728_321 assert not miller_rabin(127_5041_0188_4880_4351 ) assert miller_rabin(127_5041_0188_4880_4391 ) # 3_825_123_056_546_413_051 assert not miller_rabin(796_6646_4458_5077_8779_1867 ) assert miller_rabin(796_6646_4458_5077_8779_1951 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(5528_4067_7446_6478_9766_0333 ) assert miller_rabin(5528_4067_7446_6478_9766_0359 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
350
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. __lowerCamelCase , __lowerCamelCase = y, x % y return abs(__lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: try: __lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __lowerCamelCase = int(nums[0] ) __lowerCamelCase = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
339
0
"""simple docstring""" import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1_000, "block_out_channels": [32, 64], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } SCREAMING_SNAKE_CASE__ : List[Any] = { "sample_size": 64, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1_000, "block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } SCREAMING_SNAKE_CASE__ : Optional[int] = { "sample_size": 256, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } SCREAMING_SNAKE_CASE__ : Dict = { "num_train_timesteps": 40, "sigma_min": 0.0_0_2, "sigma_max": 8_0.0, } SCREAMING_SNAKE_CASE__ : str = { "num_train_timesteps": 201, "sigma_min": 0.0_0_2, "sigma_max": 8_0.0, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "num_train_timesteps": 151, "sigma_min": 0.0_0_2, "sigma_max": 8_0.0, } def __magic_name__ ( __lowerCAmelCase : Tuple ) -> Tuple: if isinstance(lowercase__ , lowercase__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=False ) -> str: __lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.0.weight'''] __lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.0.bias'''] __lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.2.weight'''] __lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.2.bias'''] __lowerCamelCase = checkpoint[f'''{old_prefix}.emb_layers.1.weight'''] __lowerCamelCase = checkpoint[f'''{old_prefix}.emb_layers.1.bias'''] __lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.0.weight'''] __lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.0.bias'''] __lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.3.weight'''] __lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.3.bias'''] if has_skip: __lowerCamelCase = checkpoint[f'''{old_prefix}.skip_connection.weight'''] __lowerCamelCase = checkpoint[f'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict=None ) -> List[Any]: __lowerCamelCase = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) __lowerCamelCase = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) __lowerCamelCase = checkpoint[f'''{old_prefix}.norm.weight'''] __lowerCamelCase = checkpoint[f'''{old_prefix}.norm.bias'''] __lowerCamelCase = weight_q.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase = bias_q.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase = weight_k.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase = bias_k.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase = weight_v.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase = bias_v.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase = ( checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) __lowerCamelCase = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ) -> int: __lowerCamelCase = torch.load(lowercase__ , map_location='''cpu''' ) __lowerCamelCase = {} __lowerCamelCase = checkpoint["""time_embed.0.weight"""] __lowerCamelCase = checkpoint["""time_embed.0.bias"""] __lowerCamelCase = checkpoint["""time_embed.2.weight"""] __lowerCamelCase = checkpoint["""time_embed.2.bias"""] if unet_config["num_class_embeds"] is not None: __lowerCamelCase = checkpoint["""label_emb.weight"""] __lowerCamelCase = checkpoint["""input_blocks.0.0.weight"""] __lowerCamelCase = checkpoint["""input_blocks.0.0.bias"""] __lowerCamelCase = unet_config["""down_block_types"""] __lowerCamelCase = unet_config["""layers_per_block"""] __lowerCamelCase = unet_config["""attention_head_dim"""] __lowerCamelCase = unet_config["""block_out_channels"""] __lowerCamelCase = 1 __lowerCamelCase = channels_list[0] for i, layer_type in enumerate(lowercase__ ): __lowerCamelCase = channels_list[i] __lowerCamelCase = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(lowercase__ ): __lowerCamelCase = f'''down_blocks.{i}.resnets.{j}''' __lowerCamelCase = f'''input_blocks.{current_layer}.0''' __lowerCamelCase = True if j == 0 and downsample_block_has_skip else False __lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(lowercase__ ): __lowerCamelCase = f'''down_blocks.{i}.resnets.{j}''' __lowerCamelCase = f'''input_blocks.{current_layer}.0''' __lowerCamelCase = True if j == 0 and downsample_block_has_skip else False __lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ ) __lowerCamelCase = f'''down_blocks.{i}.attentions.{j}''' __lowerCamelCase = f'''input_blocks.{current_layer}.1''' __lowerCamelCase = convert_attention( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) current_layer += 1 if i != len(lowercase__ ) - 1: __lowerCamelCase = f'''down_blocks.{i}.downsamplers.0''' __lowerCamelCase = f'''input_blocks.{current_layer}.0''' __lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) current_layer += 1 __lowerCamelCase = current_channels # hardcoded the mid-block for now __lowerCamelCase = """mid_block.resnets.0""" __lowerCamelCase = """middle_block.0""" __lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowerCamelCase = """mid_block.attentions.0""" __lowerCamelCase = """middle_block.1""" __lowerCamelCase = convert_attention(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowerCamelCase = """mid_block.resnets.1""" __lowerCamelCase = """middle_block.2""" __lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowerCamelCase = 0 __lowerCamelCase = unet_config["""up_block_types"""] for i, layer_type in enumerate(lowercase__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): __lowerCamelCase = f'''up_blocks.{i}.resnets.{j}''' __lowerCamelCase = f'''output_blocks.{current_layer}.0''' __lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ ) current_layer += 1 if i != len(lowercase__ ) - 1: __lowerCamelCase = f'''up_blocks.{i}.upsamplers.0''' __lowerCamelCase = f'''output_blocks.{current_layer-1}.1''' __lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): __lowerCamelCase = f'''up_blocks.{i}.resnets.{j}''' __lowerCamelCase = f'''output_blocks.{current_layer}.0''' __lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ ) __lowerCamelCase = f'''up_blocks.{i}.attentions.{j}''' __lowerCamelCase = f'''output_blocks.{current_layer}.1''' __lowerCamelCase = convert_attention( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) current_layer += 1 if i != len(lowercase__ ) - 1: __lowerCamelCase = f'''up_blocks.{i}.upsamplers.0''' __lowerCamelCase = f'''output_blocks.{current_layer-1}.2''' __lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowerCamelCase = checkpoint["""out.0.weight"""] __lowerCamelCase = checkpoint["""out.0.bias"""] __lowerCamelCase = checkpoint["""out.2.weight"""] __lowerCamelCase = checkpoint["""out.2.bias"""] return new_checkpoint if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model." ) parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.") SCREAMING_SNAKE_CASE__ : str = parser.parse_args() SCREAMING_SNAKE_CASE__ : Union[str, Any] = strabool(args.class_cond) SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.basename(args.unet_path) print(F'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: SCREAMING_SNAKE_CASE__ : Any = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): SCREAMING_SNAKE_CASE__ : Union[str, Any] = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: SCREAMING_SNAKE_CASE__ : str = TEST_UNET_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : str = con_pt_to_diffuser(args.unet_path, unet_config) SCREAMING_SNAKE_CASE__ : int = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: SCREAMING_SNAKE_CASE__ : Dict = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: SCREAMING_SNAKE_CASE__ : Tuple = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): SCREAMING_SNAKE_CASE__ : List[Any] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') SCREAMING_SNAKE_CASE__ : Tuple = CMStochasticIterativeScheduler(**scheduler_config) SCREAMING_SNAKE_CASE__ : List[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
351
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids __lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids __lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits __lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean() __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
339
0
def __magic_name__ ( ) -> int: for n in range(1 , 100_0000 ): yield n * (n + 1) // 2 def __magic_name__ ( __lowerCAmelCase : Tuple ) -> Optional[Any]: __lowerCamelCase = 1 __lowerCamelCase = 2 while i * i <= n: __lowerCamelCase = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def __magic_name__ ( ) -> int: return next(i for i in triangle_number_generator() if count_divisors(__UpperCAmelCase ) > 500 ) if __name__ == "__main__": print(solution())
352
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = "bart" SCREAMING_SNAKE_CASE__ : Dict = True @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> str: if LOAD_DENSE_INDEX: __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __lowerCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __lowerCamelCase = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __lowerCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __lowerCamelCase = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> Optional[int]: if LOAD_DENSE_INDEX: __lowerCamelCase = faiss.StandardGpuResources() __lowerCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __lowerCamelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase = faiss.IndexFlatIP(128 ) __lowerCamelCase = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase ) wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase = (None, None) __lowerCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> List[str]: __lowerCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __lowerCamelCase = elia['''train_eli5'''] __lowerCamelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __lowerCamelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__lowerCAmelCase ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_indexes() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = load_models() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_train_data() def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=10 ) -> List[str]: __lowerCamelCase = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = [elia_train[int(__lowerCAmelCase )] for i in I[0]] return nn_examples def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict="wiki40b" , __lowerCAmelCase : Any="dense" , __lowerCAmelCase : Dict=10 ) -> Union[str, Any]: if source == "none": __lowerCamelCase , __lowerCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase = query_qa_dense_index( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: __lowerCamelCase , __lowerCamelCase = query_es_index( __lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , ) __lowerCamelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __lowerCamelCase = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __lowerCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None), } ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=64 , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[Any]=0.95 , __lowerCAmelCase : List[Any]=0.8 ) -> Any: with torch.no_grad(): __lowerCamelCase = qa_sas_generate( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar SCREAMING_SNAKE_CASE__ : List[str] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" SCREAMING_SNAKE_CASE__ : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE__ : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE__ : str = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.checkbox("Demo options") if demo_options: SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.selectbox( "", action_list, index=3, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = action_list.index(action_st) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = show_type == "Show full text of passages" else: SCREAMING_SNAKE_CASE__ : Any = 3 SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options") if retrieval_options: SCREAMING_SNAKE_CASE__ : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: SCREAMING_SNAKE_CASE__ : List[str] = "wiki40b" SCREAMING_SNAKE_CASE__ : Optional[Any] = "dense" SCREAMING_SNAKE_CASE__ : str = "beam" SCREAMING_SNAKE_CASE__ : List[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 64 SCREAMING_SNAKE_CASE__ : List[Any] = 256 SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.checkbox("Generation options") if generate_options: SCREAMING_SNAKE_CASE__ : Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE__ : str = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Dict = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # start main text SCREAMING_SNAKE_CASE__ : Any = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] SCREAMING_SNAKE_CASE__ : List[str] = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.text_input("Enter your question here:", "") else: SCREAMING_SNAKE_CASE__ : str = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_support(question, source=wiki_source, method="dense", n_results=10) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = make_support(question, source=wiki_source, method="sparse", n_results=10) SCREAMING_SNAKE_CASE__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE__ : Optional[Any] = support_list[:10] SCREAMING_SNAKE_CASE__ : Tuple = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) SCREAMING_SNAKE_CASE__ : Tuple = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE__ : Union[str, Any] = "[{}]({})".format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE__ : Dict = sec_titles.split(" & ") SCREAMING_SNAKE_CASE__ : int = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE__ : Any = find_nearest_training(question) SCREAMING_SNAKE_CASE__ : List[Any] = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) SCREAMING_SNAKE_CASE__ : List[Any] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
339
0
import os def __magic_name__ ( __lowerCAmelCase : str = "matrix.txt" ) -> int: with open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) as in_file: __lowerCamelCase = in_file.read() __lowerCamelCase = [[int(__lowerCAmelCase ) for cell in row.split(''',''' )] for row in data.strip().splitlines()] __lowerCamelCase = [[0 for cell in row] for row in grid] __lowerCamelCase = len(grid[0] ) __lowerCamelCase = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )] __lowerCamelCase = grid[0][0] for i in range(1 , __lowerCAmelCase ): __lowerCamelCase = grid[0][i] + dp[0][i - 1] for i in range(1 , __lowerCAmelCase ): __lowerCamelCase = grid[i][0] + dp[i - 1][0] for i in range(1 , __lowerCAmelCase ): for j in range(1 , __lowerCAmelCase ): __lowerCamelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(F'{solution() = }')
353
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Dict = """xmod""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout __lowerCamelCase = pre_norm __lowerCamelCase = adapter_reduction_factor __lowerCamelCase = adapter_layer_norm __lowerCamelCase = adapter_reuse_layer_norm __lowerCamelCase = ln_before_adapter __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = default_language class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
339
0
from collections.abc import Callable import numpy as np def __magic_name__ ( __lowerCAmelCase : Callable , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> np.array: __lowerCamelCase = int(np.ceil((x_end - xa) / step_size ) ) __lowerCamelCase = np.zeros((n + 1,) ) __lowerCamelCase = ya __lowerCamelCase = xa for k in range(lowerCamelCase__ ): __lowerCamelCase = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] ) __lowerCamelCase = y[k] + ( (step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
354
from collections import namedtuple import requests from lxml import html # type: ignore SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
339
0
from __future__ import annotations SCREAMING_SNAKE_CASE__ : int = [True] * 1_000_001 SCREAMING_SNAKE_CASE__ : List[str] = 2 while i * i <= 1_000_000: if seive[i]: for j in range(i * i, 1_000_001, i): SCREAMING_SNAKE_CASE__ : List[Any] = False i += 1 def __magic_name__ ( __lowerCAmelCase : int ) -> Tuple: return seive[n] def __magic_name__ ( __lowerCAmelCase : int ) -> int: return any(digit in '''02468''' for digit in str(__a ) ) def __magic_name__ ( __lowerCAmelCase : int = 100_0000 ) -> Tuple: __lowerCamelCase = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(__a ) and not contains_an_even_digit(__a ): __lowerCamelCase = str(__a ) __lowerCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__a ) )] if all(is_prime(__a ) for i in list_nums ): result.append(__a ) return result def __magic_name__ ( ) -> Optional[int]: return len(find_circular_primes() ) if __name__ == "__main__": print(F'{len(find_circular_primes()) = }')
355
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) a__ : Optional[str] = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) a__ : Optional[int] = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} ) a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} ) a__ : bool = field( default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict: logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) ) def __magic_name__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__lowerCAmelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __lowerCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __lowerCamelCase = SeqaSeqDataset # Get datasets __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __lowerCamelCase = ( build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None ) __lowerCamelCase = SeqaSeqTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator( __lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) __lowerCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __lowerCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __lowerCamelCase = train_result.metrics __lowerCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) __lowerCamelCase = data_args.n_val __lowerCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' ) __lowerCamelCase = test_output.metrics __lowerCamelCase = data_args.n_test if trainer.is_world_process_zero(): __lowerCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.predict_with_generate: __lowerCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) __lowerCamelCase = lmap(str.strip , __lowerCAmelCase ) write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
0
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( lowerCamelCase__ , unittest.TestCase ): a__ : int = KandinskyVaaPriorPipeline a__ : Tuple = ['prompt'] a__ : int = ['prompt', 'negative_prompt'] a__ : Optional[Any] = [ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] a__ : Optional[Any] = False @property def __A ( self : Dict ) -> str: return 32 @property def __A ( self : List[Any] ) -> List[str]: return 32 @property def __A ( self : Tuple ) -> Dict: return self.time_input_dim @property def __A ( self : int ) -> str: return self.time_input_dim * 4 @property def __A ( self : Tuple ) -> Union[str, Any]: return 1_00 @property def __A ( self : str ) -> Optional[int]: __lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def __A ( self : int ) -> str: torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ ) @property def __A ( self : List[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 12, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } __lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __lowerCamelCase = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def __A ( self : Union[str, Any] ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __lowerCamelCase = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : int ) -> List[Any]: __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor def __A ( self : Union[str, Any] ) -> Tuple: __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=10.0 , ) __lowerCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=0 ) -> str: if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def __A ( self : str ) -> Optional[Any]: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.image_embeds __lowerCamelCase = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0] __lowerCamelCase = image[0, -10:] __lowerCamelCase = image_from_tuple[0, -10:] assert image.shape == (1, 32) __lowerCamelCase = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def __A ( self : Any ) -> int: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True __lowerCamelCase = False self._test_inference_batch_single_identical( test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , ) @skip_mps def __A ( self : str ) -> Dict: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = False self._test_attention_slicing_forward_pass( test_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , )
356
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = ScoreSdeVeScheduler() __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Tuple ) -> str: __lowerCamelCase = '''google/ncsnpp-church-256''' __lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
0
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __magic_name__ ( __lowerCAmelCase : BertModel , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> int: """simple docstring""" __lowerCamelCase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') __lowerCamelCase = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(__A ): os.makedirs(__A ) __lowerCamelCase = model.state_dict() def to_tf_var_name(__lowerCAmelCase : str ): for patt, repl in iter(__A ): __lowerCamelCase = name.replace(__A , __A ) return f'''bert/{name}''' def create_tf_var(__lowerCAmelCase : np.ndarray , __lowerCAmelCase : str , __lowerCAmelCase : tf.Session ): __lowerCamelCase = tf.dtypes.as_dtype(tensor.dtype ) __lowerCamelCase = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__A ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __lowerCamelCase = to_tf_var_name(__A ) __lowerCamelCase = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): __lowerCamelCase = torch_tensor.T __lowerCamelCase = create_tf_var(tensor=__A , name=__A , session=__A ) tf.keras.backend.set_value(__A , __A ) __lowerCamelCase = session.run(__A ) print(f'''Successfully created {tf_name}: {np.allclose(__A , __A )}''' ) __lowerCamelCase = tf.train.Saver(tf.trainable_variables() ) saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def __magic_name__ ( __lowerCAmelCase : Any=None ) -> str: """simple docstring""" __lowerCamelCase = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' ) __lowerCamelCase = parser.parse_args(__A ) __lowerCamelCase = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
357
from functools import lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> set: __lowerCamelCase = 2 __lowerCamelCase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__lowerCAmelCase ) if n > 1: factors.add(__lowerCAmelCase ) return factors @lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> int: return len(unique_prime_factors(__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : list ) -> bool: return len(set(__lowerCAmelCase ) ) in (0, 1) def __magic_name__ ( __lowerCAmelCase : int ) -> list: __lowerCamelCase = 2 while True: # Increment each value of a generated range __lowerCamelCase = [base + i for i in range(__lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. __lowerCamelCase = [upf_len(__lowerCAmelCase ) for x in group] checker.append(__lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(__lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def __magic_name__ ( __lowerCAmelCase : int = 4 ) -> int: __lowerCamelCase = run(__lowerCAmelCase ) return results[0] if len(__lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
339
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class lowerCAmelCase__ ( lowercase__ ): a__ : List[Any] = 'canine' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=7_68 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : int=30_72 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=1_63_84 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1e-12 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : List[str]=0XE0_00 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0XE0_01 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : int=1_63_84 , SCREAMING_SNAKE_CASE__ : int=1_28 , **SCREAMING_SNAKE_CASE__ : str , ) -> str: super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = type_vocab_size __lowerCamelCase = layer_norm_eps # Character config: __lowerCamelCase = downsampling_rate __lowerCamelCase = upsampling_kernel_size __lowerCamelCase = num_hash_functions __lowerCamelCase = num_hash_buckets __lowerCamelCase = local_transformer_stride
358
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def __A ( self : Any ) -> Tuple: return TaConfig.from_pretrained('''google/umt5-base''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]: if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, input_dict def __A ( self : Tuple ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __A ( self : Optional[Any] ) -> Any: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : List[Any] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval() # first forward pass __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval() __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() ) @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ : Tuple = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ : int = True a__ : int = False a__ : Tuple = False a__ : Optional[int] = True a__ : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ : Tuple = [0.8, 0.9] def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __A ( self : Union[str, Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Any: __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() model.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __A ( self : Tuple ) -> Optional[Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { """facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""", """facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""", """facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""", """facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""", """facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""", """facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""", """facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""", """facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""", """facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""", } class lowerCAmelCase__ ( __A ): a__ : Tuple = 'xmod' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=3_05_22 , SCREAMING_SNAKE_CASE__ : Tuple=7_68 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=30_72 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_12 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : str=1e-12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : int=("en_XX",) , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Any: super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout __lowerCamelCase = pre_norm __lowerCamelCase = adapter_reduction_factor __lowerCamelCase = adapter_layer_norm __lowerCamelCase = adapter_reuse_layer_norm __lowerCamelCase = ln_before_adapter __lowerCamelCase = list(__lowercase ) __lowerCamelCase = default_language class lowerCAmelCase__ ( __A ): @property def __A ( self : List[str] ) -> int: if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
359
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Union[str, Any] = """open-llama""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict: __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = rms_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_dropout_prob __lowerCamelCase = use_stable_embedding __lowerCamelCase = shared_input_output_embedding __lowerCamelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) __lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
339
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { "salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json", } class lowerCAmelCase__ ( A_ ): a__ : Union[str, Any] = "blip_2_vision_model" def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=14_08 , SCREAMING_SNAKE_CASE__ : Optional[Any]=61_44 , SCREAMING_SNAKE_CASE__ : Any=39 , SCREAMING_SNAKE_CASE__ : str=16 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_24 , SCREAMING_SNAKE_CASE__ : str=14 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.00001 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1e-10 , SCREAMING_SNAKE_CASE__ : Dict=True , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Tuple: super().__init__(**snake_case__ ) __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = patch_size __lowerCamelCase = image_size __lowerCamelCase = initializer_range __lowerCamelCase = attention_dropout __lowerCamelCase = layer_norm_eps __lowerCamelCase = hidden_act __lowerCamelCase = qkv_bias @classmethod def __A ( cls : str , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> "PretrainedConfig": cls._set_token_in_kwargs(snake_case__ ) __lowerCamelCase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('''model_type''' ) == "blip-2": __lowerCamelCase = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class lowerCAmelCase__ ( A_ ): a__ : Optional[int] = "blip_2_qformer" def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : Any=7_68 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : List[Any]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=14_08 , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> int: super().__init__(pad_token_id=snake_case__ , **snake_case__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = cross_attention_frequency __lowerCamelCase = encoder_hidden_size @classmethod def __A ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Any ) -> "PretrainedConfig": cls._set_token_in_kwargs(snake_case__ ) __lowerCamelCase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('''model_type''' ) == "blip-2": __lowerCamelCase = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class lowerCAmelCase__ ( A_ ): a__ : int = "blip-2" a__ : Optional[int] = True def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Any=32 , **SCREAMING_SNAKE_CASE__ : Dict ) -> Dict: super().__init__(**snake_case__ ) if vision_config is None: __lowerCamelCase = {} logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' ) if qformer_config is None: __lowerCamelCase = {} logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' ) if text_config is None: __lowerCamelCase = {} logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' ) __lowerCamelCase = BlipaVisionConfig(**snake_case__ ) __lowerCamelCase = BlipaQFormerConfig(**snake_case__ ) __lowerCamelCase = text_config["model_type"] if "model_type" in text_config else "opt" __lowerCamelCase = CONFIG_MAPPING[text_model_type](**snake_case__ ) __lowerCamelCase = self.text_config.tie_word_embeddings __lowerCamelCase = self.text_config.is_encoder_decoder __lowerCamelCase = num_query_tokens __lowerCamelCase = self.vision_config.hidden_size __lowerCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __lowerCamelCase = 1.0 __lowerCamelCase = 0.02 @classmethod def __A ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE__ : BlipaVisionConfig , SCREAMING_SNAKE_CASE__ : BlipaQFormerConfig , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Dict: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case__ , ) def __A ( self : List[Any] ) -> Optional[Any]: __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.vision_config.to_dict() __lowerCamelCase = self.qformer_config.to_dict() __lowerCamelCase = self.text_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
360
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY") SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL") @dataclass(frozen=__lowercase , slots=__lowercase ) class lowerCAmelCase__ ( Generic[KEY, VAL] ): a__ : KEY a__ : VAL class lowerCAmelCase__ ( _Item ): def __init__( self : str ) -> None: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __bool__( self : Tuple ) -> bool: return False SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem() class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ): def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None: __lowerCamelCase = initial_block_size __lowerCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowerCamelCase = capacity_factor __lowerCamelCase = 0 def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int: return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int: return (ind + 1) % len(self._buckets ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool: __lowerCamelCase = self._buckets[ind] if not stored: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self._len += 1 return True elif stored.key == key: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return True else: return False def __A ( self : Any ) -> bool: __lowerCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False __lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None: __lowerCamelCase = self._buckets __lowerCamelCase = [None] * new_size __lowerCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __A ( self : str ) -> None: self._resize(len(self._buckets ) * 2 ) def __A ( self : Dict ) -> None: self._resize(len(self._buckets ) // 2 ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]: __lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ ) for _ in range(len(self._buckets ) ): yield ind __lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): break def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE__ ) if item is _deleted: continue if item.key == key: __lowerCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE__ ) def __len__( self : int ) -> int: return self._len def __iter__( self : Tuple ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ) -> str: __lowerCamelCase = ''' ,'''.join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
339
0
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str: # Initialise PyTorch model __lowerCamelCase = RemBertConfig.from_json_file(lowerCAmelCase__ ) print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) ) __lowerCamelCase = RemBertModel(lowerCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) ) torch.save(model.state_dict() , lowerCAmelCase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
361
from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE__ : Any = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def __magic_name__ ( ) -> Any: __lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCamelCase = g.get_repo('''huggingface/transformers''' ) __lowerCamelCase = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) __lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
339
0
import sys from pathlib import Path SCREAMING_SNAKE_CASE : Tuple = Path(__file__).resolve().parents[3] / "src" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) SCREAMING_SNAKE_CASE : Optional[Any] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"} SCREAMING_SNAKE_CASE : Union[str, Any] = "zero2" SCREAMING_SNAKE_CASE : Optional[Any] = "zero3" SCREAMING_SNAKE_CASE : int = [ZEROa, ZEROa] def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> Dict: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __lowerCamelCase = parameterized.to_safe_name('''_'''.join(str(lowercase__ ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test SCREAMING_SNAKE_CASE : List[Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCAmelCase__ ( __lowercase ): @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int: self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) @require_torch_multi_gpu @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]: self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) @require_torch_multi_gpu @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ) -> str: self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple: # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] = 10 , SCREAMING_SNAKE_CASE__ : Optional[Any] = True , SCREAMING_SNAKE_CASE__ : List[Any] = True , SCREAMING_SNAKE_CASE__ : Union[str, Any] = True , ) -> List[str]: __lowerCamelCase = models[model] __lowerCamelCase = self.run_trainer( stage=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , eval_steps=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) self.do_checks(SCREAMING_SNAKE_CASE__ ) return output_dir def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict = 10 , SCREAMING_SNAKE_CASE__ : Tuple = 1 , SCREAMING_SNAKE_CASE__ : Optional[Any] = True , SCREAMING_SNAKE_CASE__ : List[Any] = True , ) -> str: __lowerCamelCase = self.get_auto_remove_tmp_dir('''./xxx''' , after=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = f''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(SCREAMING_SNAKE_CASE__ )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __lowerCamelCase = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() __lowerCamelCase = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] __lowerCamelCase = self.get_launcher(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() ) return output_dir def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> str: # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) __lowerCamelCase = min(2 , get_gpu_count() ) if distributed else 1 return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
362
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
339
0
def __magic_name__ ( ) -> int: __lowerCamelCase = 0 for i in range(1 , 1001 ): total += i**i return str(_lowerCamelCase )[-10:] if __name__ == "__main__": print(solution())
363
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> Dict: __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: shutil.rmtree(self.tmpdirname ) def __A ( self : str ) -> Any: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : List[Any] ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Optional[Any] ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
0
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
364
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None: if start is None: __lowerCamelCase = 0 if end is None: __lowerCamelCase = len(__lowerCAmelCase ) - 1 if start >= end: return __lowerCamelCase = (start + end) // 2 slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) if sequence[end] < sequence[mid]: __lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end] slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
339
0
import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py SCREAMING_SNAKE_CASE__ : Optional[int] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. SCREAMING_SNAKE_CASE__ : List[Any] = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. SCREAMING_SNAKE_CASE__ : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") SCREAMING_SNAKE_CASE__ : str = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def __magic_name__ ( __lowerCAmelCase : Optional[Any] ) -> Dict: __lowerCamelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __a ) return [m.group(0 ) for m in matches] def __magic_name__ ( ) -> Dict: __lowerCamelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __lowerCamelCase = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. __lowerCamelCase = collections.defaultdict(__a ) __lowerCamelCase = collections.defaultdict(__a ) __lowerCamelCase = collections.defaultdict(__a ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(__a ): __lowerCamelCase = None if _re_tf_models.match(__a ) is not None: __lowerCamelCase = tf_models __lowerCamelCase = _re_tf_models.match(__a ).groups()[0] elif _re_flax_models.match(__a ) is not None: __lowerCamelCase = flax_models __lowerCamelCase = _re_flax_models.match(__a ).groups()[0] elif _re_pt_models.match(__a ) is not None: __lowerCamelCase = pt_models __lowerCamelCase = _re_pt_models.match(__a ).groups()[0] if lookup_dict is not None: while len(__a ) > 0: if attr_name in model_prefix_to_model_type: __lowerCamelCase = True break # Try again after removing the last word in the name __lowerCamelCase = ''''''.join(camel_case_split(__a )[:-1] ) __lowerCamelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) __lowerCamelCase = list(__a ) all_models.sort() __lowerCamelCase = {'''model_type''': all_models} __lowerCamelCase = [pt_models[t] for t in all_models] __lowerCamelCase = [tf_models[t] for t in all_models] __lowerCamelCase = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure __lowerCamelCase = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: __lowerCamelCase = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: __lowerCamelCase = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: __lowerCamelCase = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. __lowerCamelCase = '''AutoTokenizer''' __lowerCamelCase = [processors[t] for t in all_models] return pd.DataFrame(__a ) def __magic_name__ ( __lowerCAmelCase : str ) -> str: __lowerCamelCase = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: __lowerCamelCase = [model_mapping, f'''TF_{model_mapping}''', f'''FLAX_{model_mapping}'''] __lowerCamelCase = [auto_class, f'''TF_{auto_class}''', f'''Flax_{auto_class}'''] # Loop through all three frameworks for module, cls, mapping in zip(__a , __a , __a ): # The type of pipeline may not exist in this framework if not hasattr(__a , __a ): continue # First extract all model_names __lowerCamelCase = [] for name in getattr(__a , __a ).values(): if isinstance(__a , __a ): model_names.append(__a ) else: model_names.extend(list(__a ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ) -> Optional[Any]: __lowerCamelCase = get_frameworks_table() __lowerCamelCase = Dataset.from_pandas(__a ) __lowerCamelCase = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=__a ) __lowerCamelCase = Dataset.from_json(__a ) __lowerCamelCase = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(__a ) ) } __lowerCamelCase = update_pipeline_and_auto_class_table(__a ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. __lowerCamelCase = sorted(table.keys() ) __lowerCamelCase = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) __lowerCamelCase = Dataset.from_pandas(__a ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(__a , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(__a , '''pipeline_tags.json''' ) ) if commit_sha is not None: __lowerCamelCase = ( f'''Update with commit {commit_sha}\n\nSee: ''' f'''https://github.com/huggingface/transformers/commit/{commit_sha}''' ) else: __lowerCamelCase = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=__a , repo_type='''dataset''' , token=__a , commit_message=__a , ) def __magic_name__ ( ) -> int: __lowerCamelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} __lowerCamelCase = transformers_module.pipelines.SUPPORTED_TASKS __lowerCamelCase = [] for key in pipeline_tasks: if key not in in_table: __lowerCamelCase = pipeline_tasks[key]['''pt'''] if isinstance(__a , (list, tuple) ): __lowerCamelCase = model[0] __lowerCamelCase = model.__name__ if model not in in_table.values(): missing.append(__a ) if len(__a ) > 0: __lowerCamelCase = ''', '''.join(__a ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' f'''`utils/update_metadata.py`: {msg}. Please add them!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
365
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__lowerCAmelCase ) return pairs class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} @property def __A ( self : Dict ) -> int: return len(self.encoder ) def __A ( self : str ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ ) if "\n" in token: __lowerCamelCase = token.replace('''\n''' , ''' __newln__''' ) __lowerCamelCase = token.split(''' ''' ) __lowerCamelCase = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE__ ): continue __lowerCamelCase = token.lower() __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: words.append(SCREAMING_SNAKE_CASE__ ) continue while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) new_word.extend(word[i:j] ) __lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word words.append(SCREAMING_SNAKE_CASE__ ) return " ".join(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = [] __lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) ) return split_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int: __lowerCamelCase = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip() return out_string def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file
339
0
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __magic_name__ ( __lowerCAmelCase : int ) -> str: if "model" in orig_key: __lowerCamelCase = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: __lowerCamelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: __lowerCamelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: __lowerCamelCase = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: __lowerCamelCase = orig_key.split('''.''' )[0].split('''_''' )[-1] __lowerCamelCase = orig_key.replace(f'''transformer_{layer_num}''' , f'''encoder.layer.{layer_num}''' ) if "mha.attn" in orig_key: __lowerCamelCase = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: __lowerCamelCase = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: __lowerCamelCase = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: __lowerCamelCase = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: __lowerCamelCase = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: __lowerCamelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: __lowerCamelCase = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: __lowerCamelCase = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: __lowerCamelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: __lowerCamelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: __lowerCamelCase = """yoso.""" + orig_key return orig_key def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ) -> int: for key in orig_state_dict.copy().keys(): __lowerCamelCase = orig_state_dict.pop(lowerCAmelCase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: __lowerCamelCase = val __lowerCamelCase = orig_state_dict["""cls.predictions.decoder.bias"""] __lowerCamelCase = torch.arange(lowerCAmelCase__ ).expand((1, -1) ) + 2 return orig_state_dict def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Any: __lowerCamelCase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )["""model_state_dict"""] __lowerCamelCase = YosoConfig.from_json_file(lowerCAmelCase__ ) __lowerCamelCase = YosoForMaskedLM(lowerCAmelCase__ ) __lowerCamelCase = convert_checkpoint_helper(config.max_position_embeddings , lowerCAmelCase__ ) print(model.load_state_dict(lowerCAmelCase__ ) ) model.eval() model.save_pretrained(lowerCAmelCase__ ) print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) SCREAMING_SNAKE_CASE__ : int = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
366
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : str = ShapEImgaImgPipeline a__ : Union[str, Any] = ["""image"""] a__ : Optional[int] = ["""image"""] a__ : Union[str, Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a__ : List[str] = False @property def __A ( self : Dict ) -> Optional[Any]: return 32 @property def __A ( self : Optional[int] ) -> Optional[int]: return 32 @property def __A ( self : Optional[int] ) -> List[Any]: return self.time_input_dim * 4 @property def __A ( self : str ) -> List[Any]: return 8 @property def __A ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __A ( self : Dict ) -> int: torch.manual_seed(0 ) __lowerCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Tuple ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE__ ) return model def __A ( self : Optional[int] ) -> List[str]: __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , ) __lowerCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> int: __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : str ) -> Tuple: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : str ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : str ) -> Union[str, Any]: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) __lowerCamelCase = pipe( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
0
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class lowerCAmelCase__ : def __init__( self : int , SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]: __lowerCamelCase = parent __lowerCamelCase = 13 __lowerCamelCase = 7 __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = 99 __lowerCamelCase = 32 __lowerCamelCase = 2 __lowerCamelCase = 4 __lowerCamelCase = 37 __lowerCamelCase = "gelu" __lowerCamelCase = 0.1 __lowerCamelCase = 0.1 __lowerCamelCase = 5_12 __lowerCamelCase = 16 __lowerCamelCase = 2 __lowerCamelCase = 0.02 __lowerCamelCase = 3 __lowerCamelCase = 4 __lowerCamelCase = None def __A ( self : Any ) -> int: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple: __lowerCamelCase = TFDistilBertModel(config=lowerCAmelCase__ ) __lowerCamelCase = {"input_ids": input_ids, "attention_mask": input_mask} __lowerCamelCase = model(lowerCAmelCase__ ) __lowerCamelCase = [input_ids, input_mask] __lowerCamelCase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict: __lowerCamelCase = TFDistilBertForMaskedLM(config=lowerCAmelCase__ ) __lowerCamelCase = {"input_ids": input_ids, "attention_mask": input_mask} __lowerCamelCase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> str: __lowerCamelCase = TFDistilBertForQuestionAnswering(config=lowerCAmelCase__ ) __lowerCamelCase = { "input_ids": input_ids, "attention_mask": input_mask, } __lowerCamelCase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]: __lowerCamelCase = self.num_labels __lowerCamelCase = TFDistilBertForSequenceClassification(lowerCAmelCase__ ) __lowerCamelCase = {"input_ids": input_ids, "attention_mask": input_mask} __lowerCamelCase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple: __lowerCamelCase = self.num_choices __lowerCamelCase = TFDistilBertForMultipleChoice(lowerCAmelCase__ ) __lowerCamelCase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) __lowerCamelCase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) __lowerCamelCase = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } __lowerCamelCase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]: __lowerCamelCase = self.num_labels __lowerCamelCase = TFDistilBertForTokenClassification(lowerCAmelCase__ ) __lowerCamelCase = {"input_ids": input_ids, "attention_mask": input_mask} __lowerCamelCase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : int ) -> int: __lowerCamelCase = self.prepare_config_and_inputs() (__lowerCamelCase) = config_and_inputs __lowerCamelCase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ): a__ : Optional[int] = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) a__ : Dict = ( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) a__ : Any = False a__ : Union[str, Any] = False def __A ( self : str ) -> Dict: __lowerCamelCase = TFDistilBertModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , dim=37 ) def __A ( self : Optional[int] ) -> Any: self.config_tester.run_common_tests() def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ ) def __A ( self : str ) -> Tuple: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ ) def __A ( self : int ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ ) def __A ( self : Dict ) -> List[Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ ) def __A ( self : Dict ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ ) @slow def __A ( self : Any ) -> str: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): __lowerCamelCase = TFDistilBertModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): @slow def __A ( self : List[str] ) -> Tuple: __lowerCamelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __lowerCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowerCamelCase = model(lowerCAmelCase__ )[0] __lowerCamelCase = [1, 6, 7_68] self.assertEqual(output.shape , lowerCAmelCase__ ) __lowerCamelCase = tf.constant( [ [ [0.19261885, -0.13732955, 0.4119799], [0.22150156, -0.07422661, 0.39037204], [0.22756018, -0.0896414, 0.3701467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
367
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ : str = "" SCREAMING_SNAKE_CASE__ : Any = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def __magic_name__ ( ) -> None: __lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) print('''Processing...''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for index, image in enumerate(__lowerCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase = random_chars(32 ) __lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' ) __lowerCamelCase = [] for anno in new_annos[index]: __lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__lowerCAmelCase ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]: __lowerCamelCase = [] __lowerCamelCase = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ): __lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__lowerCAmelCase ) as in_file: __lowerCamelCase = in_file.readlines() __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' ) __lowerCamelCase = [] for obj_list in obj_lists: __lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for idx in range(len(__lowerCAmelCase ) ): __lowerCamelCase = [] __lowerCamelCase = img_list[idx] path_list.append(__lowerCAmelCase ) __lowerCamelCase = anno_list[idx] __lowerCamelCase = cva.imread(__lowerCAmelCase ) if flip_type == 1: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCAmelCase ) new_imgs_list.append(__lowerCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
339
0
import baseaa def __magic_name__ ( __lowerCAmelCase : str ) -> bytes: return baseaa.aaaencode(string.encode('''utf-8''' ) ) def __magic_name__ ( __lowerCAmelCase : bytes ) -> str: return baseaa.aaadecode(_UpperCamelCase ).decode('''utf-8''' ) if __name__ == "__main__": import doctest doctest.testmod()
368
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( __lowerCAmelCase : Any ) -> int: __lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0] @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(rows * cols * num_images ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) __lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) return data @deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict: __lowerCamelCase = labels_dense.shape[0] __lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes __lowerCamelCase = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase = 1 return labels_one_hot @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(__lowerCAmelCase ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase ) return labels class lowerCAmelCase__ : @deprecated( SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: __lowerCamelCase = 1_00_00 __lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' __lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase = images.astype(numpy.floataa ) __lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 ) __lowerCamelCase = images __lowerCamelCase = labels __lowerCamelCase = 0 __lowerCamelCase = 0 @property def __A ( self : str ) -> Optional[int]: return self._images @property def __A ( self : Any ) -> Dict: return self._labels @property def __A ( self : List[Any] ) -> int: return self._num_examples @property def __A ( self : str ) -> Any: return self._epochs_completed def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str: if fake_data: __lowerCamelCase = [1] * 7_84 __lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE__ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE__ )], ) __lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perma] __lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase = self._num_examples - start __lowerCamelCase = self._images[start : self._num_examples] __lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perm] __lowerCamelCase = self.labels[perm] # Start next epoch __lowerCamelCase = 0 __lowerCamelCase = batch_size - rest_num_examples __lowerCamelCase = self._index_in_epoch __lowerCamelCase = self._images[start:end] __lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: if not gfile.Exists(__lowerCAmelCase ): gfile.MakeDirs(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if not gfile.Exists(__lowerCAmelCase ): urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310 with gfile.GFile(__lowerCAmelCase ) as f: __lowerCamelCase = f.size() print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' ) return filepath @deprecated( __lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase ) __lowerCamelCase = fake() __lowerCamelCase = fake() __lowerCamelCase = fake() return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase ) if not source_url: # empty string check __lowerCamelCase = DEFAULT_SOURCE_URL __lowerCamelCase = '''train-images-idx3-ubyte.gz''' __lowerCamelCase = '''train-labels-idx1-ubyte.gz''' __lowerCamelCase = '''t10k-images-idx3-ubyte.gz''' __lowerCamelCase = '''t10k-labels-idx1-ubyte.gz''' __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) if not 0 <= validation_size <= len(__lowerCAmelCase ): __lowerCamelCase = ( '''Validation size should be between 0 and ''' f'''{len(__lowerCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(__lowerCAmelCase ) __lowerCamelCase = train_images[:validation_size] __lowerCamelCase = train_labels[:validation_size] __lowerCamelCase = train_images[validation_size:] __lowerCamelCase = train_labels[validation_size:] __lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
339
0
from functools import reduce SCREAMING_SNAKE_CASE__ : List[Any] = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __magic_name__ ( __lowerCAmelCase : Any = N ) -> List[Any]: return max( # mypy cannot properly interpret reduce int(reduce(lambda __lowerCAmelCase , __lowerCAmelCase : str(int(a__ ) * int(a__ ) ) , n[i : i + 13] ) ) for i in range(len(a__ ) - 12 ) ) if __name__ == "__main__": print(F'{solution() = }')
369
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } SCREAMING_SNAKE_CASE__ : Dict = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = VOCAB_FILES_NAMES a__ : Any = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Optional[Any] = SqueezeBertTokenizer def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = do_lower_case def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str: __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: __lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
339
0
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py SCREAMING_SNAKE_CASE__ : List[str] = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) SCREAMING_SNAKE_CASE__ : List[str] = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ) -> List[str]: __lowerCamelCase = SavedModel() __lowerCamelCase = [] with open(os.path.join(__SCREAMING_SNAKE_CASE , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f: __lowerCamelCase = json.load(__SCREAMING_SNAKE_CASE )["""opsets"""] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(__SCREAMING_SNAKE_CASE )] ) with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f: saved_model.ParseFromString(f.read() ) __lowerCamelCase = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want __lowerCamelCase = sorted(__SCREAMING_SNAKE_CASE ) __lowerCamelCase = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__SCREAMING_SNAKE_CASE ) if strict and len(__SCREAMING_SNAKE_CASE ) > 0: raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(__SCREAMING_SNAKE_CASE ) > 0: print(f'''Found the following incompatible ops for the opset {opset}:''' ) print(*__SCREAMING_SNAKE_CASE , sep='''\n''' ) else: print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
370
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool: return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
0
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def __magic_name__ ( __lowerCAmelCase : Any="" ) -> str: __lowerCamelCase = tempfile.mkdtemp() return os.path.join(UpperCAmelCase__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[Any] ) -> str: __lowerCamelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5 __lowerCamelCase = AgentAudio(lowercase_ ) __lowerCamelCase = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(lowercase_ ) ) # Ensure that the file contains the same value as the original tensor __lowerCamelCase = sf.read(lowercase_ ) self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1e-4 ) ) def __A ( self : int ) -> Any: __lowerCamelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5 __lowerCamelCase = get_new_path(suffix='''.wav''' ) sf.write(lowercase_ , lowercase_ , 1_60_00 ) __lowerCamelCase = AgentAudio(lowercase_ ) self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4 ) ) self.assertEqual(agent_type.to_string() , lowercase_ ) @require_vision @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> List[Any]: __lowerCamelCase = torch.randint(0 , 2_56 , (64, 64, 3) ) __lowerCamelCase = AgentImage(lowercase_ ) __lowerCamelCase = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_ ) ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / """000000039769.png""" __lowerCamelCase = Image.open(lowercase_ ) __lowerCamelCase = AgentImage(lowercase_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_ ) ) def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / """000000039769.png""" __lowerCamelCase = Image.open(lowercase_ ) __lowerCamelCase = AgentImage(lowercase_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_ ) ) class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = """Hey!""" __lowerCamelCase = AgentText(lowercase_ ) self.assertEqual(lowercase_ , agent_type.to_string() ) self.assertEqual(lowercase_ , agent_type.to_raw() ) self.assertEqual(lowercase_ , lowercase_ )
371
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Dict = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
0
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any: __lowerCamelCase = parent def __A ( self : List[Any] ) -> Tuple: return {} def __magic_name__ ( ) -> Optional[Any]: __lowerCamelCase = '''<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>''' __lowerCamelCase = '''\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n ''' return [html_string_a, html_string_a] @require_bsa class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : Union[str, Any] = MarkupLMFeatureExtractor if is_bsa_available() else None def __A ( self : Tuple ) -> List[str]: __lowerCamelCase = MarkupLMFeatureExtractionTester(self ) @property def __A ( self : List[str] ) -> Optional[int]: return self.feature_extract_tester.prepare_feat_extract_dict() def __A ( self : str ) -> Dict: # Initialize feature_extractor __lowerCamelCase = self.feature_extraction_class() # Test not batched input __lowerCamelCase = get_html_strings()[0] __lowerCamelCase = feature_extractor(_a ) # fmt: off __lowerCamelCase = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']] __lowerCamelCase = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']] # fmt: on self.assertEqual(encoding.nodes , _a ) self.assertEqual(encoding.xpaths , _a ) # Test batched __lowerCamelCase = get_html_strings() __lowerCamelCase = feature_extractor(_a ) # fmt: off __lowerCamelCase = expected_nodes + [['''My First Heading''', '''My first paragraph.''']] __lowerCamelCase = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , _a ) self.assertEqual(encoding.xpaths , _a )
350
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. __lowerCamelCase , __lowerCamelCase = y, x % y return abs(__lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: try: __lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __lowerCamelCase = int(nums[0] ) __lowerCamelCase = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
339
0
"""simple docstring""" import math import sys def __magic_name__ ( __lowerCAmelCase : str ) -> str: __lowerCamelCase = "" try: with open(__lowerCamelCase , '''rb''' ) as binary_file: __lowerCamelCase = binary_file.read() for dat in data: __lowerCamelCase = f'''{dat:08b}''' result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def __magic_name__ ( __lowerCAmelCase : int ) -> Optional[Any]: __lowerCamelCase = {"0": "0", "1": "1"} __lowerCamelCase = "", "" __lowerCamelCase = len(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __lowerCamelCase = lexicon[curr_string] result += last_match_id __lowerCamelCase = last_match_id + "0" if math.loga(__lowerCamelCase ).is_integer(): __lowerCamelCase = {} for curr_key in list(__lowerCamelCase ): __lowerCamelCase = lexicon.pop(__lowerCamelCase ) __lowerCamelCase = new_lex __lowerCamelCase = last_match_id + "1" index += 1 __lowerCamelCase = "" return result def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ) -> str: __lowerCamelCase = 8 try: with open(__lowerCamelCase , '''wb''' ) as opened_file: __lowerCamelCase = [ to_write[i : i + byte_length] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(__lowerCamelCase , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def __magic_name__ ( __lowerCAmelCase : str ) -> int: __lowerCamelCase = 0 for letter in data_bits: if letter == "1": break counter += 1 __lowerCamelCase = data_bits[counter:] __lowerCamelCase = data_bits[counter + 1 :] return data_bits def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ) -> Any: __lowerCamelCase = read_file_binary(__lowerCamelCase ) __lowerCamelCase = remove_prefix(__lowerCamelCase ) __lowerCamelCase = decompress_data(__lowerCamelCase ) write_file_binary(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
351
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids __lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids __lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits __lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean() __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
339
0
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def __magic_name__ ( __lowerCAmelCase : str ) -> Optional[int]: return EnvironmentCommand() def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Optional[Any]: return EnvironmentCommand(args.accelerate_config_file ) class lowerCAmelCase__ ( __snake_case ): @staticmethod def __A ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: __lowerCamelCase = parser.add_parser('''env''' ) download_parser.set_defaults(func=a_ ) download_parser.add_argument( '''--accelerate-config_file''' , default=a_ , help='''The accelerate config file to use for the default values in the launching script.''' , ) download_parser.set_defaults(func=a_ ) def __init__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: __lowerCamelCase = accelerate_config_file def __A ( self : Optional[Any] ) -> Dict: __lowerCamelCase = '''not installed''' if is_safetensors_available(): import safetensors __lowerCamelCase = safetensors.__version__ elif importlib.util.find_spec('''safetensors''' ) is not None: import safetensors __lowerCamelCase = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' __lowerCamelCase = '''not installed''' __lowerCamelCase = '''not found''' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __lowerCamelCase = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(a_ ): __lowerCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict() __lowerCamelCase = ( '''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(a_ , a_ ) else f'''\t{accelerate_config}''' ) __lowerCamelCase = '''not installed''' __lowerCamelCase = '''NA''' if is_torch_available(): import torch __lowerCamelCase = torch.__version__ __lowerCamelCase = torch.cuda.is_available() __lowerCamelCase = '''not installed''' __lowerCamelCase = '''NA''' if is_tf_available(): import tensorflow as tf __lowerCamelCase = tf.__version__ try: # deprecated in v2.1 __lowerCamelCase = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __lowerCamelCase = bool(tf.config.list_physical_devices('''GPU''' ) ) __lowerCamelCase = '''not installed''' __lowerCamelCase = '''not installed''' __lowerCamelCase = '''not installed''' __lowerCamelCase = '''NA''' if is_flax_available(): import flax import jax import jaxlib __lowerCamelCase = flax.__version__ __lowerCamelCase = jax.__version__ __lowerCamelCase = jaxlib.__version__ __lowerCamelCase = jax.lib.xla_bridge.get_backend().platform __lowerCamelCase = { '''`transformers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Huggingface_hub version''': huggingface_hub.__version__, '''Safetensors version''': f'''{safetensors_version}''', '''Accelerate version''': f'''{accelerate_version}''', '''Accelerate config''': f'''{accelerate_config_str}''', '''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''', '''Tensorflow version (GPU?)''': f'''{tf_version} ({tf_cuda_available})''', '''Flax version (CPU?/GPU?/TPU?)''': f'''{flax_version} ({jax_backend})''', '''Jax version''': f'''{jax_version}''', '''JaxLib version''': f'''{jaxlib_version}''', '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(a_ ) ) return info @staticmethod def __A ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]: return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
352
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = "bart" SCREAMING_SNAKE_CASE__ : Dict = True @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> str: if LOAD_DENSE_INDEX: __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __lowerCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __lowerCamelCase = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __lowerCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __lowerCamelCase = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> Optional[int]: if LOAD_DENSE_INDEX: __lowerCamelCase = faiss.StandardGpuResources() __lowerCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __lowerCamelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase = faiss.IndexFlatIP(128 ) __lowerCamelCase = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase ) wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase = (None, None) __lowerCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> List[str]: __lowerCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __lowerCamelCase = elia['''train_eli5'''] __lowerCamelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __lowerCamelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__lowerCAmelCase ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_indexes() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = load_models() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_train_data() def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=10 ) -> List[str]: __lowerCamelCase = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = [elia_train[int(__lowerCAmelCase )] for i in I[0]] return nn_examples def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict="wiki40b" , __lowerCAmelCase : Any="dense" , __lowerCAmelCase : Dict=10 ) -> Union[str, Any]: if source == "none": __lowerCamelCase , __lowerCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase = query_qa_dense_index( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: __lowerCamelCase , __lowerCamelCase = query_es_index( __lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , ) __lowerCamelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __lowerCamelCase = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __lowerCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None), } ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=64 , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[Any]=0.95 , __lowerCAmelCase : List[Any]=0.8 ) -> Any: with torch.no_grad(): __lowerCamelCase = qa_sas_generate( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar SCREAMING_SNAKE_CASE__ : List[str] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" SCREAMING_SNAKE_CASE__ : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE__ : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE__ : str = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.checkbox("Demo options") if demo_options: SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.selectbox( "", action_list, index=3, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = action_list.index(action_st) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = show_type == "Show full text of passages" else: SCREAMING_SNAKE_CASE__ : Any = 3 SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options") if retrieval_options: SCREAMING_SNAKE_CASE__ : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: SCREAMING_SNAKE_CASE__ : List[str] = "wiki40b" SCREAMING_SNAKE_CASE__ : Optional[Any] = "dense" SCREAMING_SNAKE_CASE__ : str = "beam" SCREAMING_SNAKE_CASE__ : List[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 64 SCREAMING_SNAKE_CASE__ : List[Any] = 256 SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.checkbox("Generation options") if generate_options: SCREAMING_SNAKE_CASE__ : Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE__ : str = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Dict = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # start main text SCREAMING_SNAKE_CASE__ : Any = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] SCREAMING_SNAKE_CASE__ : List[str] = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.text_input("Enter your question here:", "") else: SCREAMING_SNAKE_CASE__ : str = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_support(question, source=wiki_source, method="dense", n_results=10) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = make_support(question, source=wiki_source, method="sparse", n_results=10) SCREAMING_SNAKE_CASE__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE__ : Optional[Any] = support_list[:10] SCREAMING_SNAKE_CASE__ : Tuple = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) SCREAMING_SNAKE_CASE__ : Tuple = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE__ : Union[str, Any] = "[{}]({})".format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE__ : Dict = sec_titles.split(" & ") SCREAMING_SNAKE_CASE__ : int = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE__ : Any = find_nearest_training(question) SCREAMING_SNAKE_CASE__ : List[Any] = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) SCREAMING_SNAKE_CASE__ : List[Any] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
339
0
import operator def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] = False , __lowerCAmelCase : Any = None ) -> Optional[Any]: __lowerCamelCase = operator.lt if reverse else operator.gt __lowerCamelCase = solution or [] if not arr: return solution __lowerCamelCase = [arr.pop(0 )] for i, item in enumerate(_A ): if _operator(_A , sublist[-1] ): sublist.append(_A ) arr.pop(_A ) # merging sublist into solution list if not solution: solution.extend(_A ) else: while sublist: __lowerCamelCase = sublist.pop(0 ) for i, xx in enumerate(_A ): if not _operator(_A , _A ): solution.insert(_A , _A ) break else: solution.append(_A ) strand_sort(_A , _A , _A ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
353
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Dict = """xmod""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout __lowerCamelCase = pre_norm __lowerCamelCase = adapter_reduction_factor __lowerCamelCase = adapter_layer_norm __lowerCamelCase = adapter_reuse_layer_norm __lowerCamelCase = ln_before_adapter __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = default_language class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
339
0
from math import ceil, sqrt def __magic_name__ ( __lowerCAmelCase : int = 100_0000 ) -> int: __lowerCamelCase = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: __lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: __lowerCamelCase = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F'{solution() = }')
354
from collections import namedtuple import requests from lxml import html # type: ignore SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
339
0
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL SCREAMING_SNAKE_CASE__ : Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : tuple , __lowerCAmelCase : Path , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple=False , ) -> Tuple: output_path.parent.mkdir(parents=snake_case_ , exist_ok=snake_case_ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( snake_case_ , snake_case_ , f=output_path.as_posix() , input_names=snake_case_ , output_names=snake_case_ , dynamic_axes=snake_case_ , do_constant_folding=snake_case_ , use_external_data_format=snake_case_ , enable_onnx_checker=snake_case_ , opset_version=snake_case_ , ) else: export( snake_case_ , snake_case_ , f=output_path.as_posix() , input_names=snake_case_ , output_names=snake_case_ , dynamic_axes=snake_case_ , do_constant_folding=snake_case_ , opset_version=snake_case_ , ) @torch.no_grad() def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : bool = False ) -> List[str]: __lowerCamelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __lowerCamelCase = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: __lowerCamelCase = """cpu""" __lowerCamelCase = Path(snake_case_ ) # VAE DECODER __lowerCamelCase = AutoencoderKL.from_pretrained(model_path + '''/vae''' ) __lowerCamelCase = vae_decoder.config.latent_channels # forward only through the decoder part __lowerCamelCase = vae_decoder.decode onnx_export( snake_case_ , model_args=( torch.randn(1 , snake_case_ , 25 , 25 ).to(device=snake_case_ , dtype=snake_case_ ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=snake_case_ , ) del vae_decoder if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") SCREAMING_SNAKE_CASE__ : Any = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
355
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) a__ : Optional[str] = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) a__ : Optional[int] = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} ) a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} ) a__ : bool = field( default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict: logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) ) def __magic_name__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__lowerCAmelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __lowerCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __lowerCamelCase = SeqaSeqDataset # Get datasets __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __lowerCamelCase = ( build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None ) __lowerCamelCase = SeqaSeqTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator( __lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) __lowerCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __lowerCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __lowerCamelCase = train_result.metrics __lowerCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) __lowerCamelCase = data_args.n_val __lowerCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' ) __lowerCamelCase = test_output.metrics __lowerCamelCase = data_args.n_test if trainer.is_world_process_zero(): __lowerCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.predict_with_generate: __lowerCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) __lowerCamelCase = lmap(str.strip , __lowerCAmelCase ) write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
0
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata SCREAMING_SNAKE_CASE__ : List[str] = "" if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"): class lowerCAmelCase__ ( tr.AbstractTransform ): def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str = " " ) -> Optional[Any]: __lowerCamelCase = sentence_delimiter def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]: return list(SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]: __lowerCamelCase = [] for sent_idx, sentence in enumerate(SCREAMING_SNAKE_CASE__ ): chars.extend(self.process_string(SCREAMING_SNAKE_CASE__ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(SCREAMING_SNAKE_CASE__ ) - 1: chars.append(self.sentence_delimiter ) return chars SCREAMING_SNAKE_CASE__ : Dict = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: SCREAMING_SNAKE_CASE__ : List[str] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) SCREAMING_SNAKE_CASE__ : Optional[int] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" SCREAMING_SNAKE_CASE__ : Dict = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n" SCREAMING_SNAKE_CASE__ : Tuple = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __A ( self : Optional[int] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> Optional[Any]: if concatenate_texts: return jiwer.compute_measures( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , truth_transform=SCREAMING_SNAKE_CASE__ , hypothesis_transform=SCREAMING_SNAKE_CASE__ , )["wer"] __lowerCamelCase = 0 __lowerCamelCase = 0 for prediction, reference in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = jiwer.compute_measures( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , truth_transform=SCREAMING_SNAKE_CASE__ , hypothesis_transform=SCREAMING_SNAKE_CASE__ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
356
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = ScoreSdeVeScheduler() __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Tuple ) -> str: __lowerCamelCase = '''google/ncsnpp-church-256''' __lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
0