code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _lowercase ( _lowercase , _lowercase ): @register_to_config def __init__( self: Optional[Any] , *, UpperCamelCase__: int = 4 , UpperCamelCase__: int = 768 , UpperCamelCase__: int , UpperCamelCase__: int , ): super().__init__() lowerCamelCase__ : Dict = nn.Parameter(torch.zeros(UpperCamelCase__ ) ) # parameters for additional clip time embeddings lowerCamelCase__ : int = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) # parameters for encoder hidden states lowerCamelCase__ : Optional[int] = clip_extra_context_tokens lowerCamelCase__ : Optional[int] = nn.Linear( UpperCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim ) lowerCamelCase__ : List[Any] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = nn.LayerNorm(UpperCamelCase__ ) def lowerCamelCase_ ( self: int , *, UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings lowerCamelCase__ : Dict = image_embeddings.shape[0] lowerCamelCase__ : Optional[int] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) lowerCamelCase__ : List[str] = classifier_free_guidance_embeddings.expand( UpperCamelCase__ , -1 ) lowerCamelCase__ : str = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] lowerCamelCase__ : Dict = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... lowerCamelCase__ : List[Any] = self.embedding_proj(UpperCamelCase__ ) lowerCamelCase__ : List[str] = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase__ ) lowerCamelCase__ : Dict = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" lowerCamelCase__ : List[str] = self.clip_extra_context_tokens_proj(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = clip_extra_context_tokens.reshape(UpperCamelCase__ , -1 , self.clip_extra_context_tokens ) lowerCamelCase__ : int = clip_extra_context_tokens.permute(0 , 2 , 1 ) lowerCamelCase__ : Optional[int] = self.encoder_hidden_states_proj(UpperCamelCase__ ) lowerCamelCase__ : str = self.text_encoder_hidden_states_norm(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
41
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]: print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )] for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): lowercase__ : List[str] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(__lowerCamelCase ): # looping through rows of graph array for i in range(__lowerCamelCase ): # looping through columns of graph array for j in range(__lowerCamelCase ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): lowercase__ : str = dist[i][k] + dist[k][j] _print_dist(__lowerCamelCase , __lowerCamelCase ) return dist, v if __name__ == "__main__": lowerCAmelCase_ = int(input('Enter number of vertices: ')) lowerCAmelCase_ = int(input('Enter number of edges: ')) lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): lowerCAmelCase_ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) lowerCAmelCase_ = int(input('Enter source:')) lowerCAmelCase_ = int(input('Enter destination:')) lowerCAmelCase_ = float(input('Enter weight:')) lowerCAmelCase_ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
16
0
'''simple docstring''' import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowercase : List[Any] = logging.getLogger() def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Union[str, Any]: _snake_case = '\n'.join(__A ) Path(__A ).open('w' ).writelines(__A ) lowercase : List[Any] = "patrickvonplaten/t5-tiny-random" lowercase : Optional[int] = "sshleifer/bart-tiny-random" lowercase : Dict = "sshleifer/tiny-mbart" lowercase : List[Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class __UpperCAmelCase ( _lowerCamelCase ): def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _snake_case = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _snake_case = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization' _snake_case = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(lowerCAmelCase_ , 'argv' , lowerCAmelCase_ ): run_generate() assert Path(lowerCAmelCase_ ).exists() # os.remove(Path(output_file_name)) def lowerCamelCase ( self ): """simple docstring""" self.run_eval_tester(lowerCAmelCase_ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" self.run_eval_tester(lowerCAmelCase_ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _snake_case = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _snake_case = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _snake_case = Path(self.get_auto_remove_tmp_dir() ) _snake_case = str(tmp_dir / 'scores.json' ) _snake_case = str(tmp_dir / 'val.target' ) _dump_articles(lowerCAmelCase_ , text['en'] ) _dump_articles(lowerCAmelCase_ , text['de'] ) _snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization' _snake_case = F'\n run_eval_search.py\n {model}\n {str(lowerCAmelCase_ )}\n {str(lowerCAmelCase_ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(lowerCAmelCase_ , 'argv' , lowerCAmelCase_ ): with CaptureStdout() as cs: run_search() _snake_case = [' num_beams | length_penalty', model, 'Best score args'] _snake_case = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(lowerCAmelCase_ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowerCAmelCase_ ).exists() os.remove(Path(lowerCAmelCase_ ) )
42
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class __A ( A_ ): '''simple docstring''' def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None: """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,) super().__init__(*_snake_case ,**_snake_case )
16
0
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __lowercase = (720, 1280) # Height, Width __lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it. __lowercase = 1 / 100 __lowercase = '''''' __lowercase = '''''' __lowercase = '''''' __lowercase = 250 def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for index in range(SCREAMING_SNAKE_CASE ): __UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __UpperCamelCase :List[Any] = random_chars(32 ) __UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0] __UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) __UpperCamelCase :Optional[Any] = [] for anno in new_annos: __UpperCamelCase :int = anno[3] - anno[1] __UpperCamelCase :Optional[int] = anno[4] - anno[2] __UpperCamelCase :int = anno[1] + width / 2 __UpperCamelCase :List[str] = anno[2] + height / 2 __UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(SCREAMING_SNAKE_CASE ) with open(f"""{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :str = [] __UpperCamelCase :str = [] for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ): __UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(SCREAMING_SNAKE_CASE ) as in_file: __UpperCamelCase :str = in_file.readlines() __UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" ) __UpperCamelCase :int = [] for obj_list in obj_lists: __UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' ) __UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2 __UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2 __UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2 __UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(SCREAMING_SNAKE_CASE ) labels.append(SCREAMING_SNAKE_CASE ) return img_paths, labels def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ): '''simple docstring''' __UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) __UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __UpperCamelCase :Optional[int] = int(scale_x * output_size[1] ) __UpperCamelCase :Any = int(scale_y * output_size[0] ) __UpperCamelCase :List[str] = [] __UpperCamelCase :Dict = [] for i, index in enumerate(SCREAMING_SNAKE_CASE ): __UpperCamelCase :Any = all_img_list[index] path_list.append(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Any = all_annos[index] __UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE ) if i == 0: # top-left __UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) ) __UpperCamelCase :Union[str, Any] = img for bbox in img_annos: __UpperCamelCase :Union[str, Any] = bbox[1] * scale_x __UpperCamelCase :Optional[Any] = bbox[2] * scale_y __UpperCamelCase :int = bbox[3] * scale_x __UpperCamelCase :Union[str, Any] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right __UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) ) __UpperCamelCase :List[str] = img for bbox in img_annos: __UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x) __UpperCamelCase :Dict = bbox[2] * scale_y __UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x) __UpperCamelCase :List[Any] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left __UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) ) __UpperCamelCase :Optional[int] = img for bbox in img_annos: __UpperCamelCase :Tuple = bbox[1] * scale_x __UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y) __UpperCamelCase :Tuple = bbox[3] * scale_x __UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right __UpperCamelCase :Optional[int] = cva.resize( SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) __UpperCamelCase :Optional[int] = img for bbox in img_annos: __UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x) __UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y) __UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x) __UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: __UpperCamelCase :List[Any] = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" __UpperCamelCase :Optional[Any] = ascii_lowercase + digits return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": main() print('''DONE ✅''')
43
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
16
0
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _a : Tuple = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): _UpperCamelCase : Optional[int] = ReformerTokenizer _UpperCamelCase : Tuple = ReformerTokenizerFast _UpperCamelCase : Tuple = True _UpperCamelCase : int = False _UpperCamelCase : Union[str, Any] = True def __A ( self ): super().setUp() _lowerCAmelCase : Any = ReformerTokenizer(a__ , keep_accents=a__ ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self ): _lowerCAmelCase : Optional[int] = """<s>""" _lowerCAmelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ ) def __A ( self ): _lowerCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(a__ ) , 1000 ) def __A ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __A ( self ): if not self.test_rust_tokenizer: return _lowerCAmelCase : Any = self.get_tokenizer() _lowerCAmelCase : Dict = self.get_rust_tokenizer() _lowerCAmelCase : Any = """I was born in 92000, and this is falsé.""" _lowerCAmelCase : Any = tokenizer.tokenize(a__ ) _lowerCAmelCase : Dict = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) _lowerCAmelCase : int = tokenizer.encode(a__ , add_special_tokens=a__ ) _lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) _lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() _lowerCAmelCase : str = tokenizer.encode(a__ ) _lowerCAmelCase : Dict = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) def __A ( self , a__=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): _lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(a__ , **a__ ) # Simple input _lowerCAmelCase : List[str] = """This is a simple input""" _lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""] _lowerCAmelCase : List[Any] = ("""This is a simple input""", """This is a pair""") _lowerCAmelCase : Optional[Any] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" ) # Simple input self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" ) # Simple input self.assertRaises( a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , ) # Pair input self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" ) # Pair input self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" ) # Pair input self.assertRaises( a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , ) def __A ( self ): pass def __A ( self ): _lowerCAmelCase : Optional[Any] = ReformerTokenizer(a__ , keep_accents=a__ ) _lowerCAmelCase : Any = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a__ ) , [285, 46, 10, 170, 382] , ) _lowerCAmelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual( a__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) _lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def __A ( self ): return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" ) @slow def __A ( self ): _lowerCAmelCase : Union[str, Any] = """Hello World!""" _lowerCAmelCase : Tuple = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def __A ( self ): _lowerCAmelCase : Dict = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) _lowerCAmelCase : Union[str, Any] = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @require_torch @slow def __A ( self ): import torch from transformers import ReformerConfig, ReformerModel # Build sequence _lowerCAmelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] _lowerCAmelCase : Dict = """ """.join(a__ ) _lowerCAmelCase : List[str] = self.big_tokenizer.encode_plus(a__ , return_tensors="""pt""" ) _lowerCAmelCase : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" ) _lowerCAmelCase : List[Any] = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) _lowerCAmelCase : Optional[int] = encoded_sequence["""input_ids"""].shape _lowerCAmelCase : Any = ReformerModel(a__ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**a__ ) model(**a__ ) @slow def __A ( self ): # fmt: off _lowerCAmelCase : Dict = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 _lowerCAmelCase : str = [ """This is a very simple sentence.""", """The quick brown fox jumps over the lazy dog.""", ] self.tokenizer_integration_test_util( expected_encoding=a__ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=a__ , sequences=a__ , )
44
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class __A ( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Any = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Any ) -> Tuple: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Any = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) def UpperCAmelCase ( self : int ) -> List[Any]: """simple docstring""" lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
16
0
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowercase_ = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : List[str]=None , ) -> Optional[int]: if attention_mask is None: __a = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: __a = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: __a = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __a = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __a = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class __lowerCAmelCase : '''simple docstring''' def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=32 , _a=2 , _a=1 , _a=0 , _a=0.02 , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = eos_token_id __a = pad_token_id __a = bos_token_id __a = initializer_range def __UpperCAmelCase ( self ): __a = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __a = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __a = shift_tokens_right(_a , 1 , 2 ) __a = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_a , ) __a = prepare_blenderbot_inputs_dict(_a , _a , _a ) return config, inputs_dict def __UpperCAmelCase ( self ): __a , __a = self.prepare_config_and_inputs() return config, inputs_dict def __UpperCAmelCase ( self , _a , _a , _a ): __a = 20 __a = model_class_name(_a ) __a = model.encode(inputs_dict['''input_ids'''] ) __a , __a = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) __a = model.init_cache(decoder_input_ids.shape[0] , _a , _a ) __a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) __a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __a = model.decode( decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , ) __a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) __a = model.decode( decoder_input_ids[:, -1:] , _a , decoder_attention_mask=_a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_a , ) __a = model.decode(_a , _a ) __a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) def __UpperCAmelCase ( self , _a , _a , _a ): __a = 20 __a = model_class_name(_a ) __a = model.encode(inputs_dict['''input_ids'''] ) __a , __a = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) __a = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __a = model.init_cache(decoder_input_ids.shape[0] , _a , _a ) __a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __a = model.decode( decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , ) __a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) __a = model.decode( decoder_input_ids[:, -1:] , _a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_a , decoder_position_ids=_a , ) __a = model.decode(_a , _a , decoder_attention_mask=_a ) __a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : str = 9_9 def __UpperCAmelCase ( self ): __a = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) __a = input_ids.shape[0] __a = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def __UpperCAmelCase ( self ): __a , __a , __a = self._get_config_and_data() __a = FlaxBlenderbotForConditionalGeneration(_a ) __a = lm_model(input_ids=_a ) __a = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _a ) def __UpperCAmelCase ( self ): __a = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) __a = FlaxBlenderbotForConditionalGeneration(_a ) __a = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) __a = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) __a = lm_model(input_ids=_a , decoder_input_ids=_a ) __a = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _a ) def __UpperCAmelCase ( self ): __a = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) __a = shift_tokens_right(_a , 1 , 2 ) __a = np.equal(_a , 1 ).astype(np.floataa ).sum() __a = np.equal(_a , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_a , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Dict = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __UpperCAmelCase : List[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def __UpperCAmelCase ( self ): __a = FlaxBlenderbotModelTester(self ) def __UpperCAmelCase ( self ): __a , __a = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_a , _a , _a ) def __UpperCAmelCase ( self ): __a , __a = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_a , _a , _a ) def __UpperCAmelCase ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __a = self._prepare_for_class(_a , _a ) __a = model_class(_a ) @jax.jit def encode_jitted(_a , _a=None , **_a ): return model.encode(input_ids=_a , attention_mask=_a ) with self.subTest('''JIT Enabled''' ): __a = encode_jitted(**_a ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __a = encode_jitted(**_a ).to_tuple() self.assertEqual(len(_a ) , len(_a ) ) for jitted_output, output in zip(_a , _a ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCAmelCase ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __a = model_class(_a ) __a = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) __a = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(_a , _a , _a ): return model.decode( decoder_input_ids=_a , decoder_attention_mask=_a , encoder_outputs=_a , ) with self.subTest('''JIT Enabled''' ): __a = decode_jitted(**_a ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __a = decode_jitted(**_a ).to_tuple() self.assertEqual(len(_a ) , len(_a ) ) for jitted_output, output in zip(_a , _a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __UpperCAmelCase ( self ): for model_class_name in self.all_model_classes: __a = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __a = np.ones((1, 1) ) * model.config.eos_token_id __a = model(_a ) self.assertIsNotNone(_a ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def __UpperCAmelCase ( self ): __a = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} __a = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} __a = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_a ) __a = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) __a = ['''Sam'''] __a = tokenizer(_a , return_tensors='''jax''' ) __a = model.generate(**_a , **_a ) __a = '''Sam is a great name. It means "sun" in Gaelic.''' __a = tokenizer.batch_decode(_a , **_a ) assert generated_txt[0].strip() == tgt_text
45
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int: lowercase__ : int = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F'''{solution() = }''')
16
0
"""simple docstring""" import os import sys SCREAMING_SNAKE_CASE__ = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) SCREAMING_SNAKE_CASE__ = [ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoTokenizer.__doc__ ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModel.__doc__ ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
46
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __A ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" debug_launcher(test_script.main ) def UpperCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" debug_launcher(test_ops.main )
16
0
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self : str , _a : List[Any] , _a : Optional[Any]=13 , _a : Tuple=7 , _a : int=True , _a : Tuple=True , _a : Any=False , _a : Tuple=True , _a : Optional[int]=99 , _a : List[Any]=32 , _a : Dict=5 , _a : List[str]=4 , _a : str=37 , _a : Dict="gelu" , _a : Optional[int]=0.1 , _a : int=0.1 , _a : List[Any]=512 , _a : int=16 , _a : List[str]=2 , _a : Union[str, Any]=0.02 , _a : Any=3 , _a : str=4 , _a : str=None , ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =parent _SCREAMING_SNAKE_CASE =batch_size _SCREAMING_SNAKE_CASE =seq_length _SCREAMING_SNAKE_CASE =is_training _SCREAMING_SNAKE_CASE =use_input_mask _SCREAMING_SNAKE_CASE =use_token_type_ids _SCREAMING_SNAKE_CASE =use_labels _SCREAMING_SNAKE_CASE =vocab_size _SCREAMING_SNAKE_CASE =hidden_size _SCREAMING_SNAKE_CASE =num_hidden_layers _SCREAMING_SNAKE_CASE =num_attention_heads _SCREAMING_SNAKE_CASE =intermediate_size _SCREAMING_SNAKE_CASE =hidden_act _SCREAMING_SNAKE_CASE =hidden_dropout_prob _SCREAMING_SNAKE_CASE =attention_probs_dropout_prob _SCREAMING_SNAKE_CASE =max_position_embeddings _SCREAMING_SNAKE_CASE =type_vocab_size _SCREAMING_SNAKE_CASE =type_sequence_label_size _SCREAMING_SNAKE_CASE =initializer_range _SCREAMING_SNAKE_CASE =num_labels _SCREAMING_SNAKE_CASE =num_choices _SCREAMING_SNAKE_CASE =scope def A ( self : Optional[Any] ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE =None if self.use_input_mask: _SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE =None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE =None _SCREAMING_SNAKE_CASE =None _SCREAMING_SNAKE_CASE =None if self.use_labels: _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Optional[int] ) -> Any: '''simple docstring''' return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) def A ( self : Tuple , _a : str , _a : str , _a : Optional[int] , _a : List[str] , _a : Dict , _a : Union[str, Any] , _a : int ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =BioGptModel(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a ) _SCREAMING_SNAKE_CASE =model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Union[str, Any] , _a : List[str] , _a : Optional[Any] , _a : str , _a : Optional[int] , _a : Optional[Any] , _a : Dict , _a : Optional[int] , _a : List[Any] , _a : str , ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =BioGptForCausalLM(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : int , _a : Dict , _a : Tuple , _a : List[str] , _a : str , _a : Optional[int] , *_a : str ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =BioGptModel(config=_a ) model.to(_a ) model.eval() # create attention mask _SCREAMING_SNAKE_CASE =torch.ones(input_ids.shape , dtype=torch.long , device=_a ) _SCREAMING_SNAKE_CASE =self.seq_length // 2 _SCREAMING_SNAKE_CASE =0 # first forward pass _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a ).to_tuple() # create hypothetical next token and extent to next_input_ids _SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids _SCREAMING_SNAKE_CASE =ids_tensor((1,) , _a ).item() + 1 _SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) _SCREAMING_SNAKE_CASE =random_other_next_tokens # append to next input_ids and attn_mask _SCREAMING_SNAKE_CASE =torch.cat([input_ids, next_tokens] , dim=-1 ) _SCREAMING_SNAKE_CASE =torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_a )] , dim=1 , ) # get two different outputs _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a )['last_hidden_state'] _SCREAMING_SNAKE_CASE =model(_a , past_key_values=_a , attention_mask=_a )['last_hidden_state'] # select random slice _SCREAMING_SNAKE_CASE =ids_tensor((1,) , output_from_past.shape[-1] ).item() _SCREAMING_SNAKE_CASE =output_from_no_past[:, -1, random_slice_idx].detach() _SCREAMING_SNAKE_CASE =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) ) def A ( self : Any , _a : Dict , _a : List[Any] , _a : str , _a : List[str] , _a : Any , *_a : List[str] ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =BioGptModel(config=_a ).to(_a ).eval() _SCREAMING_SNAKE_CASE =torch.ones(input_ids.shape , dtype=torch.long , device=_a ) # first forward pass _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , use_cache=_a ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids _SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 3) , config.vocab_size ) _SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and _SCREAMING_SNAKE_CASE =torch.cat([input_ids, next_tokens] , dim=-1 ) _SCREAMING_SNAKE_CASE =torch.cat([attention_mask, next_attn_mask] , dim=-1 ) _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a )['last_hidden_state'] _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , past_key_values=_a )[ 'last_hidden_state' ] # select random slice _SCREAMING_SNAKE_CASE =ids_tensor((1,) , output_from_past.shape[-1] ).item() _SCREAMING_SNAKE_CASE =output_from_no_past[:, -3:, random_slice_idx].detach() _SCREAMING_SNAKE_CASE =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) ) def A ( self : Optional[int] , _a : Dict , _a : Any , _a : List[Any] , _a : Tuple , _a : Dict , *_a : Optional[Any] , _a : int=False ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =BioGptForCausalLM(_a ) model.to(_a ) if gradient_checkpointing: model.gradient_checkpointing_enable() _SCREAMING_SNAKE_CASE =model(_a , labels=_a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def A ( self : int , _a : Dict , *_a : str ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =BioGptModel(_a ) _SCREAMING_SNAKE_CASE =model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def A ( self : Optional[int] , _a : Dict , _a : int , _a : Tuple , _a : Union[str, Any] , _a : Optional[int] , *_a : int ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_labels _SCREAMING_SNAKE_CASE =BioGptForTokenClassification(_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : int ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) =config_and_inputs _SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( A__ , A__ , A__ , unittest.TestCase ): A__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) A__ = (BioGptForCausalLM,) if is_torch_available() else () A__ = ( { 'feature-extraction': BioGptModel, 'text-classification': BioGptForSequenceClassification, 'text-generation': BioGptForCausalLM, 'token-classification': BioGptForTokenClassification, 'zero-shot': BioGptForSequenceClassification, } if is_torch_available() else {} ) A__ = False def A ( self : Optional[int] ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =BioGptModelTester(self ) _SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 ) def A ( self : str ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def A ( self : Optional[int] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def A ( self : Tuple ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _SCREAMING_SNAKE_CASE =type self.model_tester.create_and_check_model(*_a ) def A ( self : Dict ) -> int: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_a ) def A ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*_a , gradient_checkpointing=_a ) def A ( self : Any ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_a ) def A ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*_a ) def A ( self : Any ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*_a ) @slow def A ( self : List[str] ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(_a ) _SCREAMING_SNAKE_CASE =BioGptTokenizer.from_pretrained('microsoft/biogpt' ) _SCREAMING_SNAKE_CASE ='left' # Define PAD Token = EOS Token = 50256 _SCREAMING_SNAKE_CASE =tokenizer.eos_token _SCREAMING_SNAKE_CASE =model.config.eos_token_id # use different length sentences to test batching _SCREAMING_SNAKE_CASE =[ 'Hello, my dog is a little', 'Today, I', ] _SCREAMING_SNAKE_CASE =tokenizer(_a , return_tensors='pt' , padding=_a ) _SCREAMING_SNAKE_CASE =inputs['input_ids'].to(_a ) _SCREAMING_SNAKE_CASE =model.generate( input_ids=_a , attention_mask=inputs['attention_mask'].to(_a ) , ) _SCREAMING_SNAKE_CASE =tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(_a ) _SCREAMING_SNAKE_CASE =model.generate(input_ids=_a ) _SCREAMING_SNAKE_CASE =inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item() _SCREAMING_SNAKE_CASE =tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(_a ) _SCREAMING_SNAKE_CASE =model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings ) _SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a , skip_special_tokens=_a ) _SCREAMING_SNAKE_CASE =tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a ) _SCREAMING_SNAKE_CASE =tokenizer.decode(output_padded[0] , skip_special_tokens=_a ) _SCREAMING_SNAKE_CASE =[ 'Hello, my dog is a little bit bigger than a little bit.', 'Today, I have a good idea of how to use the information', ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , [non_padded_sentence, padded_sentence] ) @slow def A ( self : str ) -> Any: '''simple docstring''' for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE =BioGptModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def A ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE =3 _SCREAMING_SNAKE_CASE =input_dict['input_ids'] _SCREAMING_SNAKE_CASE =input_ids.ne(1 ).to(_a ) _SCREAMING_SNAKE_CASE =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _SCREAMING_SNAKE_CASE =BioGptForSequenceClassification(_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A ( self : Tuple ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE =3 _SCREAMING_SNAKE_CASE ='multi_label_classification' _SCREAMING_SNAKE_CASE =input_dict['input_ids'] _SCREAMING_SNAKE_CASE =input_ids.ne(1 ).to(_a ) _SCREAMING_SNAKE_CASE =ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _SCREAMING_SNAKE_CASE =BioGptForSequenceClassification(_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class A__ ( unittest.TestCase ): @slow def A ( self : Any ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) _SCREAMING_SNAKE_CASE =torch.tensor([[2, 4805, 9, 656, 21]] ) _SCREAMING_SNAKE_CASE =model(_a )[0] _SCREAMING_SNAKE_CASE =4_2384 _SCREAMING_SNAKE_CASE =torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , _a ) _SCREAMING_SNAKE_CASE =torch.tensor( [[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) ) @slow def A ( self : Any ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =BioGptTokenizer.from_pretrained('microsoft/biogpt' ) _SCREAMING_SNAKE_CASE =BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(_a ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =tokenizer('COVID-19 is' , return_tensors='pt' ).to(_a ) _SCREAMING_SNAKE_CASE =model.generate( **_a , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_a , ) _SCREAMING_SNAKE_CASE =tokenizer.decode(output_ids[0] , skip_special_tokens=_a ) _SCREAMING_SNAKE_CASE =( 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the' ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and' ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),' ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and' ' more than 800,000 deaths.' ) self.assertEqual(_a , _a )
47
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase_ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
0
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = XLNetTokenizer lowerCamelCase_ : Union[str, Any] = XLNetTokenizerFast lowerCamelCase_ : Optional[int] = True lowerCamelCase_ : Optional[int] = True def _lowercase ( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase : int = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self ) -> str: lowerCamelCase : str = "<s>" lowerCamelCase : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def _lowercase ( self ) -> Any: lowerCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "<eod>" ) self.assertEqual(len(UpperCamelCase__ ) , 1006 ) def _lowercase ( self ) -> Optional[int]: self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : int = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) lowerCamelCase : str = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] ) lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCamelCase : int = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def _lowercase ( self ) -> Any: lowerCamelCase : Union[str, Any] = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ ) lowerCamelCase : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + "", "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] ) def _lowercase ( self ) -> int: lowerCamelCase : str = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ ) lowerCamelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) @slow def _lowercase ( self ) -> Tuple: lowerCamelCase : List[Any] = XLNetTokenizer.from_pretrained("xlnet-base-cased" ) lowerCamelCase : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase__ ) lowerCamelCase : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase__ ) lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ ) lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def _lowercase ( self ) -> Union[str, Any]: # fmt: off lowerCamelCase : str = {"input_ids": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
48
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A ( metaclass=A_ ): '''simple docstring''' lowerCAmelCase : List[str] = ["torch", "torchsde"] def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]: """simple docstring""" requires_backends(self ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] )
16
0
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __snake_case :Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class _A ( __UpperCAmelCase ,__UpperCAmelCase ): @register_to_config def __init__( self : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None): '''simple docstring''' super().__init__() __a = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" __a = torch.zeros(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else: __a = None __a = torch.nn.Parameter(__SCREAMING_SNAKE_CASE) class _A ( __UpperCAmelCase ): UpperCamelCase__ : VQModel UpperCamelCase__ : CLIPTextModel UpperCamelCase__ : CLIPTokenizer UpperCamelCase__ : TransformeraDModel UpperCamelCase__ : LearnedClassifierFreeSamplingEmbeddings UpperCamelCase__ : VQDiffusionScheduler def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : CLIPTextModel , __SCREAMING_SNAKE_CASE : CLIPTokenizer , __SCREAMING_SNAKE_CASE : TransformeraDModel , __SCREAMING_SNAKE_CASE : VQDiffusionScheduler , __SCREAMING_SNAKE_CASE : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__SCREAMING_SNAKE_CASE , transformer=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = len(__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else 1 # get prompt text embeddings __a = self.tokenizer( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) __a = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __a = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' F' {self.tokenizer.model_max_length} tokens: {removed_text}') __a = text_input_ids[:, : self.tokenizer.model_max_length] __a = self.text_encoder(text_input_ids.to(self.device))[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 __a = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__SCREAMING_SNAKE_CASE) # duplicate text embeddings for each generation per prompt __a = prompt_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: __a = self.learned_classifier_free_sampling_embeddings.embeddings __a = negative_prompt_embeds.unsqueeze(0).repeat(__SCREAMING_SNAKE_CASE , 1 , 1) else: __a = [''''''] * batch_size __a = text_input_ids.shape[-1] __a = self.tokenizer( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) __a = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # See comment for normalizing text embeddings __a = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__SCREAMING_SNAKE_CASE) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __a = negative_prompt_embeds.shape[1] __a = negative_prompt_embeds.repeat(1 , __SCREAMING_SNAKE_CASE , 1) __a = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __SCREAMING_SNAKE_CASE , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __a = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds @torch.no_grad() def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 100 , __SCREAMING_SNAKE_CASE : float = 5.0 , __SCREAMING_SNAKE_CASE : float = 1.0 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , ): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = 1 elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = len(__SCREAMING_SNAKE_CASE) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(__SCREAMING_SNAKE_CASE)}') __a = batch_size * num_images_per_prompt __a = guidance_scale > 1.0 __a = self._encode_prompt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(__SCREAMING_SNAKE_CASE)}.') # get the initial completely masked latents unless the user supplied it __a = (batch_size, self.transformer.num_latent_pixels) if latents is None: __a = self.transformer.num_vector_embeds - 1 __a = torch.full(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).to(self.device) else: if latents.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' F' {self.transformer.num_vector_embeds - 1} (inclusive).') __a = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.scheduler.timesteps.to(self.device) __a = latents for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE)): # expand the sample if we are doing classifier free guidance __a = torch.cat([sample] * 2) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` __a = self.transformer(__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE).sample if do_classifier_free_guidance: __a , __a = model_output.chunk(2) __a = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__SCREAMING_SNAKE_CASE , dim=1 , keepdim=__SCREAMING_SNAKE_CASE) __a = self.truncate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # remove `log(0)`'s (`-inf`s) __a = model_output.clamp(-70) # compute the previous noisy sample x_t -> x_t-1 __a = self.scheduler.step(__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , sample=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.vqvae.config.vq_embed_dim __a = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) __a = self.vqvae.quantize.get_codebook_entry(__SCREAMING_SNAKE_CASE , shape=__SCREAMING_SNAKE_CASE) __a = self.vqvae.decode(__SCREAMING_SNAKE_CASE , force_not_quantize=__SCREAMING_SNAKE_CASE).sample __a = (image / 2 + 0.5).clamp(0 , 1) __a = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": __a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : float): '''simple docstring''' __a , __a = torch.sort(__SCREAMING_SNAKE_CASE , 1 , descending=__SCREAMING_SNAKE_CASE) __a = torch.exp(__SCREAMING_SNAKE_CASE) __a = sorted_p_x_0.cumsum(dim=1) < truncation_rate # Ensure that at least the largest probability is not zeroed out __a = torch.full_like(keep_mask[:, 0:1, :] , __SCREAMING_SNAKE_CASE) __a = torch.cat((all_true, keep_mask) , dim=1) __a = keep_mask[:, :-1, :] __a = keep_mask.gather(1 , indices.argsort(1)) __a = log_p_x_0.clone() __a = -torch.inf # -inf = log(0) return rv
49
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 class __A ( A_ ): '''simple docstring''' pass def __UpperCAmelCase ( __lowerCamelCase ) -> Dict: for shard in shards: for i in range(__lowerCamelCase ): yield {"i": i, "shard": shard} def __UpperCAmelCase ( ) -> Tuple: lowercase__ : int = int(os.environ['''RANK'''] ) lowercase__ : str = int(os.environ['''WORLD_SIZE'''] ) lowercase__ : List[Any] = ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCamelCase ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase ) parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 ) lowercase__ : int = parser.parse_args() lowercase__ : Optional[Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]} lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase ) if not streaming: lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) ) lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase ) lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase ) lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : str = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) lowercase__ : str = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
16
0
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCAmelCase : UpperCAmelCase__ = None UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = True UpperCAmelCase__ = None UpperCAmelCase__ = 1 UpperCAmelCase__ = None UpperCAmelCase__ = False UpperCAmelCase__ = None UpperCAmelCase__ = None def A_ ( self : List[str] ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(UpperCAmelCase ) for k, v in self.__dict__.items()} )
50
"""simple docstring""" from ...configuration_utils import PretrainedConfig lowerCAmelCase_ = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : str = "tapas" def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_snake_case ,**_snake_case ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowercase__ : Optional[int] = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Optional[int] = hidden_act lowercase__ : List[Any] = intermediate_size lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Dict = attention_probs_dropout_prob lowercase__ : str = max_position_embeddings lowercase__ : Dict = type_vocab_sizes lowercase__ : Optional[Any] = initializer_range lowercase__ : Dict = layer_norm_eps # Fine-tuning task hyperparameters lowercase__ : Any = positive_label_weight lowercase__ : int = num_aggregation_labels lowercase__ : List[str] = aggregation_loss_weight lowercase__ : Optional[int] = use_answer_as_supervision lowercase__ : Optional[Any] = answer_loss_importance lowercase__ : Union[str, Any] = use_normalized_answer_loss lowercase__ : str = huber_loss_delta lowercase__ : str = temperature lowercase__ : int = aggregation_temperature lowercase__ : List[Any] = use_gumbel_for_cells lowercase__ : Tuple = use_gumbel_for_aggregation lowercase__ : Union[str, Any] = average_approximation_function lowercase__ : Union[str, Any] = cell_selection_preference lowercase__ : Any = answer_loss_cutoff lowercase__ : List[Any] = max_num_rows lowercase__ : str = max_num_columns lowercase__ : int = average_logits_per_cell lowercase__ : str = select_one_column lowercase__ : str = allow_empty_column_selection lowercase__ : Any = init_cell_selection_weights_to_zero lowercase__ : Optional[int] = reset_position_index_per_cell lowercase__ : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters lowercase__ : Optional[Any] = aggregation_labels lowercase__ : List[Any] = no_aggregation_label_index if isinstance(self.aggregation_labels ,_snake_case ): lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
16
0
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline snake_case_ : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") snake_case_ : Tuple = parser.parse_args() snake_case_ : Dict = "cpu" snake_case_ : Any = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" snake_case_ : Union[str, Any] = "path-to-your-trained-model" snake_case_ : Tuple = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: snake_case_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) snake_case_ : Dict = pipe.to(device) # to channels last snake_case_ : List[Any] = pipe.unet.to(memory_format=torch.channels_last) snake_case_ : List[Any] = pipe.vae.to(memory_format=torch.channels_last) snake_case_ : Tuple = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: snake_case_ : Union[str, Any] = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex snake_case_ : List[Any] = torch.randn(2, 4, 64, 64) snake_case_ : int = torch.rand(1) * 999 snake_case_ : List[Any] = torch.randn(2, 77, 768) snake_case_ : Optional[Any] = (sample, timestep, encoder_hidden_status) try: snake_case_ : Tuple = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: snake_case_ : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) snake_case_ : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) snake_case_ : Dict = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: snake_case_ : Optional[int] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute snake_case_ : str = 666 snake_case_ : List[Any] = torch.Generator(device).manual_seed(seed) snake_case_ : List[str] = {"generator": generator} if args.steps is not None: snake_case_ : List[str] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): snake_case_ : Optional[Any] = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
51
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : '''simple docstring''' def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]: """simple docstring""" lowercase__ : Dict = parent lowercase__ : Any = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Dict = patch_size lowercase__ : int = num_channels lowercase__ : Any = embed_dim lowercase__ : int = depths lowercase__ : Dict = num_heads lowercase__ : List[Any] = window_size lowercase__ : int = mlp_ratio lowercase__ : Optional[int] = qkv_bias lowercase__ : str = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : Dict = drop_path_rate lowercase__ : int = hidden_act lowercase__ : Tuple = use_absolute_embeddings lowercase__ : Tuple = patch_norm lowercase__ : Tuple = layer_norm_eps lowercase__ : Optional[Any] = initializer_range lowercase__ : int = is_training lowercase__ : Optional[int] = scope lowercase__ : str = use_labels lowercase__ : Dict = type_sequence_label_size lowercase__ : Union[str, Any] = encoder_stride def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None if self.use_labels: lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : Any = SwinvaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ) lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any: """simple docstring""" lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Tuple = model(_snake_case ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ : Optional[int] = 1 lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : str = model(_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any: """simple docstring""" lowercase__ : Tuple = self.type_sequence_label_size lowercase__ : Dict = SwinvaForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Union[str, Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowerCAmelCase : Optional[int] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : List[Any] = False lowerCAmelCase : Dict = False lowerCAmelCase : List[Any] = False lowerCAmelCase : Any = False def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[Any] = SwinvaModelTester(self ) lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 ) def UpperCAmelCase ( self : int ) -> Any: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : str ) -> List[Any]: """simple docstring""" lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' ) def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''' ) def UpperCAmelCase ( self : List[str] ) -> str: """simple docstring""" pass def UpperCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowercase__ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) ) def UpperCAmelCase ( self : int ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(_snake_case ) lowercase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple = True for model_class in self.all_model_classes: lowercase__ : Optional[int] = True lowercase__ : str = False lowercase__ : Union[str, Any] = True lowercase__ : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Dict = outputs.attentions lowercase__ : Any = len(self.model_tester.depths ) self.assertEqual(len(_snake_case ) ,_snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : List[Any] = True lowercase__ : Optional[Any] = config.window_size**2 lowercase__ : Any = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(_snake_case ) ,_snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) lowercase__ : Optional[Any] = len(_snake_case ) # Check attention is always last and order is fine lowercase__ : Optional[int] = True lowercase__ : Tuple = True lowercase__ : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) if hasattr(self.model_tester ,'''num_hidden_states_types''' ): lowercase__ : int = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowercase__ : List[str] = 2 self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) ) lowercase__ : Optional[int] = outputs.attentions self.assertEqual(len(_snake_case ) ,_snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ : List[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Optional[int] = outputs.hidden_states lowercase__ : List[Any] = getattr( self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_snake_case ) ,_snake_case ) # Swinv2 has a different seq_length lowercase__ : Dict = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) lowercase__ : Tuple = outputs.reshaped_hidden_states self.assertEqual(len(_snake_case ) ,_snake_case ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape lowercase__ : int = ( reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def UpperCAmelCase ( self : Tuple ) -> int: """simple docstring""" lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : str = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) def UpperCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = 3 lowercase__ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase__ : Optional[int] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowercase__ : str = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) def UpperCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def UpperCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: lowercase__ : Optional[int] = model_class(config=_snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @require_vision @require_torch class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : Any ) -> List[str]: """simple docstring""" lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to( _snake_case ) lowercase__ : Union[str, Any] = self.default_image_processor lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case ) # forward pass with torch.no_grad(): lowercase__ : Optional[Any] = model(**_snake_case ) # verify the logits lowercase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
16
0
import os import sys import unittest __lowerCamelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowerCamelCase : List[Any] = os.path.join(git_repo_path, """src""", """transformers""") __lowerCamelCase : Tuple = """ {0} = None """ __lowerCamelCase : List[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ __lowerCamelCase : Optional[Any] = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(A_ ) UpperCamelCase : List[Any] = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(A_ , "tokenizers" ) UpperCamelCase : Union[str, Any] = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(A_ , "tensorflow_text" ) UpperCamelCase : int = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(A_ , "sentencepiece_and_tokenizers" ) UpperCamelCase : Tuple = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(A_ , "sentencepiece_and_tensorflow_text" ) UpperCamelCase : List[Any] = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(A_ , "sentencepiece_and_tokenizers_and_vision" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , A_ ) self.assertIn("tensorflow_text" , A_ ) self.assertIn("sentencepiece_and_tokenizers" , A_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(A_ , "\nCONSTANT = None\n" ) UpperCamelCase : Optional[Any] = create_dummy_object("function" , "'torch'" ) self.assertEqual( A_ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) UpperCamelCase : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" UpperCamelCase : Dict = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" UpperCamelCase : str = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , A_ )
52
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict: """simple docstring""" import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]: """simple docstring""" if NLTK_VERSION >= version.Version('''3.6.5''' ): lowercase__ : int = [ meteor_score.single_meteor_score( word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] else: lowercase__ : Tuple = [ meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] return {"meteor": np.mean(_snake_case )}
16
0
'''simple docstring''' import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : """simple docstring""" def __init__( self : Union[str, Any] , __A : str , __A : Tuple=1_3 , __A : Optional[int]=[3_0, 3_0] , __A : str=2 , __A : List[Any]=3 , __A : Dict=True , __A : Union[str, Any]=True , __A : Tuple=3_2 , __A : str=5 , __A : Dict=4 , __A : Optional[int]=3_7 , __A : Tuple="gelu" , __A : Tuple=0.1 , __A : List[str]=0.1 , __A : List[str]=1_0 , __A : Optional[int]=0.02 , __A : str=3 , __A : Dict=None , __A : List[str]=8 , __A : Any=1_0 , ): __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = image_size __UpperCamelCase = patch_size __UpperCamelCase = num_channels __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = num_labels __UpperCamelCase = scope __UpperCamelCase = n_targets __UpperCamelCase = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens __UpperCamelCase = (image_size[1] // patch_size) * (image_size[0] // patch_size) __UpperCamelCase = num_patches + 1 + self.num_detection_tokens def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) __UpperCamelCase = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) __UpperCamelCase = [] for i in range(self.batch_size ): __UpperCamelCase = {} __UpperCamelCase = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=__A ) __UpperCamelCase = torch.rand(self.n_targets , 4 , device=__A ) labels.append(__A ) __UpperCamelCase = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : Tuple ): return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def _lowerCamelCase ( self : Optional[Any] , __A : str , __A : Dict , __A : Dict ): __UpperCamelCase = YolosModel(config=__A ) model.to(__A ) model.eval() __UpperCamelCase = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def _lowerCamelCase ( self : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Optional[Any] ): __UpperCamelCase = YolosForObjectDetection(__A ) model.to(__A ) model.eval() __UpperCamelCase = model(pixel_values=__A ) __UpperCamelCase = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) __UpperCamelCase = model(pixel_values=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict =(YolosModel, YolosForObjectDetection) if is_torch_available() else () SCREAMING_SNAKE_CASE_ : str =( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : Dict =False SCREAMING_SNAKE_CASE_ : Tuple =False SCREAMING_SNAKE_CASE_ : Optional[int] =False SCREAMING_SNAKE_CASE_ : Tuple =False def _lowerCamelCase ( self : Optional[Any] , __A : Optional[int] , __A : Dict , __A : str=False ): __UpperCamelCase = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": __UpperCamelCase = [] for i in range(self.model_tester.batch_size ): __UpperCamelCase = {} __UpperCamelCase = torch.ones( size=(self.model_tester.n_targets,) , device=__A , dtype=torch.long ) __UpperCamelCase = torch.ones( self.model_tester.n_targets , 4 , device=__A , dtype=torch.float ) labels.append(__A ) __UpperCamelCase = labels return inputs_dict def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = YolosModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 ) def _lowerCamelCase ( self : List[str] ): self.config_tester.run_common_tests() def _lowerCamelCase ( self : Union[str, Any] ): # YOLOS does not use inputs_embeds pass def _lowerCamelCase ( self : Any ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , nn.Linear ) ) def _lowerCamelCase ( self : Any ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__A ) __UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , __A ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = True # in YOLOS, the seq_len is different __UpperCamelCase = self.model_tester.expected_seq_len for model_class in self.all_model_classes: __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): __UpperCamelCase = model(**self._prepare_for_class(__A , __A ) ) __UpperCamelCase = outputs.attentions self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCamelCase = True __UpperCamelCase = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): __UpperCamelCase = model(**self._prepare_for_class(__A , __A ) ) __UpperCamelCase = outputs.attentions self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __UpperCamelCase = len(__A ) # Check attention is always last and order is fine __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): __UpperCamelCase = model(**self._prepare_for_class(__A , __A ) ) __UpperCamelCase = 1 self.assertEqual(out_len + added_hidden_states , len(__A ) ) __UpperCamelCase = outputs.attentions self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _lowerCamelCase ( self : str ): def check_hidden_states_output(__A : List[str] , __A : int , __A : Tuple ): __UpperCamelCase = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): __UpperCamelCase = model(**self._prepare_for_class(__A , __A ) ) __UpperCamelCase = outputs.hidden_states __UpperCamelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__A ) , __A ) # YOLOS has a different seq_length __UpperCamelCase = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCamelCase = True check_hidden_states_output(__A , __A , __A ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*__A ) @slow def _lowerCamelCase ( self : List[str] ): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = YolosModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCamelCase ( self : Any ): return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None @slow def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(__A ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=__A , return_tensors='pt' ).to(__A ) # forward pass with torch.no_grad(): __UpperCamelCase = model(inputs.pixel_values ) # verify outputs __UpperCamelCase = torch.Size((1, 1_0_0, 9_2) ) self.assertEqual(outputs.logits.shape , __A ) __UpperCamelCase = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__A , ) __UpperCamelCase = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __A , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __A , atol=1e-4 ) ) # verify postprocessing __UpperCamelCase = image_processor.post_process_object_detection( __A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] __UpperCamelCase = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__A ) __UpperCamelCase = [7_5, 7_5, 1_7, 6_3, 1_7] __UpperCamelCase = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__A ) self.assertEqual(len(results['scores'] ) , 5 ) self.assertTrue(torch.allclose(results['scores'] , __A , atol=1e-4 ) ) self.assertSequenceEqual(results['labels'].tolist() , __A ) self.assertTrue(torch.allclose(results['boxes'][0, :] , __A ) )
53
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = '▁' lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'} lowerCAmelCase_ = { 'vocab_file': { 'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model', } } lowerCAmelCase_ = { 'facebook/xglm-564M': 2_048, } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : int = ["input_ids", "attention_mask"] def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None: """simple docstring""" lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase__ : Any = 7 lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )] lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,) lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_snake_case ) ) lowercase__ : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase__ : Optional[int] = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase__ : List[str] = len(self.sp_model ) lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(_snake_case ) lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int ) -> Optional[int]: """simple docstring""" lowercase__ : List[Any] = self.__dict__.copy() lowercase__ : Optional[int] = None lowercase__ : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any: """simple docstring""" lowercase__ : int = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): lowercase__ : Dict = {} lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase__ : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case ) if token_ids_a is None: return [1] + ([0] * len(_snake_case )) return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ : List[Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def UpperCAmelCase ( self : str ) -> Tuple: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_snake_case ,out_type=_snake_case ) def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip() return out_string def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : Any = os.path.join( _snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case ,'''wb''' ) as fi: lowercase__ : Dict = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,)
16
0
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = emb.weight.shape __SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_="facebook/mbart-large-en-ro" , lowerCAmelCase_=False , lowerCAmelCase_=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch.load(lowerCAmelCase_ , map_location="cpu" )["model"] remove_ignore_keys_(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = state_dict["encoder.embed_tokens.weight"].shape[0] __SCREAMING_SNAKE_CASE = MBartConfig.from_pretrained(lowerCAmelCase_ , vocab_size=lowerCAmelCase_ ) if mbart_aa and finetuned: __SCREAMING_SNAKE_CASE = "relu" __SCREAMING_SNAKE_CASE = state_dict["decoder.embed_tokens.weight"] __SCREAMING_SNAKE_CASE = MBartForConditionalGeneration(lowerCAmelCase_ ) model.model.load_state_dict(lowerCAmelCase_ ) if finetuned: __SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : List[str] = parser.parse_args() a__ : int = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
54
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]: print(f"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase ) else: lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase ) if hidden_sizes == 1_92: lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase ) if hidden_sizes == 2_56: lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase ) if hidden_sizes == 3_84: lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase ) from_model.eval() lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval() lowercase__ : str = OrderedDict() lowercase__ : int = from_model.state_dict() lowercase__ : Dict = list(from_model.state_dict().keys() ) lowercase__ : Any = list(our_model.state_dict().keys() ) print(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for i in range(len(__lowerCamelCase ) ): lowercase__ : str = weights[og_keys[i]] our_model.load_state_dict(__lowerCamelCase ) lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) ) lowercase__ : Optional[int] = from_model(__lowerCamelCase ) lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one." lowercase__ : Any = name print(__lowerCamelCase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) lowercase__ : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f"""Pushed {checkpoint_name}""" ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]: lowercase__ : Any = '''imagenet-1k-id2label.json''' lowercase__ : Tuple = 10_00 lowercase__ : Dict = (1, num_labels) lowercase__ : List[str] = '''huggingface/label-files''' lowercase__ : str = num_labels lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} lowercase__ : Union[str, Any] = idalabel lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()} lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) lowercase__ : Tuple = { '''levit-128S''': 1_28, '''levit-128''': 1_28, '''levit-192''': 1_92, '''levit-256''': 2_56, '''levit-384''': 3_84, } lowercase__ : Any = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, expected_shape if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
16
0
'''simple docstring''' import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) a_ : List[str] = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def __snake_case ( UpperCAmelCase_ : Any ): lowerCamelCase_ = {} state_dict.pop("pixel_mean" , UpperCAmelCase_ ) state_dict.pop("pixel_std" , UpperCAmelCase_ ) lowerCamelCase_ = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowerCamelCase_ = key.replace(UpperCAmelCase_ , UpperCAmelCase_ ) if re.match(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase_ = int(re.match(UpperCAmelCase_ , UpperCAmelCase_ ).group(2 ) ) if layer_nb == 0: lowerCamelCase_ = key.replace("layers.0" , "proj_in" ) elif layer_nb == 1: lowerCamelCase_ = key.replace("layers.1" , "layers.0" ) elif layer_nb == 2: lowerCamelCase_ = key.replace("layers.2" , "proj_out" ) lowerCamelCase_ = value lowerCamelCase_ = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict="ybelkada/segment-anything" ): lowerCamelCase_ = hf_hub_download(UpperCAmelCase_ , F'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: lowerCamelCase_ = SamConfig() elif "sam_vit_l" in model_name: lowerCamelCase_ = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) lowerCamelCase_ = SamConfig( vision_config=UpperCAmelCase_ , ) elif "sam_vit_h" in model_name: lowerCamelCase_ = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) lowerCamelCase_ = SamConfig( vision_config=UpperCAmelCase_ , ) lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location="cpu" ) lowerCamelCase_ = replace_keys(UpperCAmelCase_ ) lowerCamelCase_ = SamImageProcessor() lowerCamelCase_ = SamProcessor(image_processor=UpperCAmelCase_ ) lowerCamelCase_ = SamModel(UpperCAmelCase_ ) hf_model.load_state_dict(UpperCAmelCase_ ) lowerCamelCase_ = hf_model.to("cuda" ) lowerCamelCase_ = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("RGB" ) lowerCamelCase_ = [[[400, 650]]] lowerCamelCase_ = [[1]] lowerCamelCase_ = processor(images=np.array(UpperCAmelCase_ ) , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): lowerCamelCase_ = hf_model(**UpperCAmelCase_ ) lowerCamelCase_ = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_8902_5115_9668 lowerCamelCase_ = processor( images=np.array(UpperCAmelCase_ ) , input_points=UpperCAmelCase_ , input_labels=UpperCAmelCase_ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): lowerCamelCase_ = hf_model(**UpperCAmelCase_ ) lowerCamelCase_ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712_6030_9219_3604 lowerCamelCase_ = ((75, 275, 1725, 850),) lowerCamelCase_ = processor(images=np.array(UpperCAmelCase_ ) , input_boxes=UpperCAmelCase_ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): lowerCamelCase_ = hf_model(**UpperCAmelCase_ ) lowerCamelCase_ = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686_0156_0592_6514 # Test with 2 points and 1 image. lowerCamelCase_ = [[[400, 650], [800, 650]]] lowerCamelCase_ = [[1, 1]] lowerCamelCase_ = processor( images=np.array(UpperCAmelCase_ ) , input_points=UpperCAmelCase_ , input_labels=UpperCAmelCase_ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): lowerCamelCase_ = hf_model(**UpperCAmelCase_ ) lowerCamelCase_ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936_0477_9243_4692 if __name__ == "__main__": a_ : int = argparse.ArgumentParser() a_ : List[str] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) a_ : Optional[Any] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
55
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __A : '''simple docstring''' lowerCAmelCase : List[str] lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "dict" lowerCAmelCase : ClassVar[Any] = None lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ ) def __call__( self : List[str] ) -> Any: """simple docstring""" return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class __A : '''simple docstring''' lowerCAmelCase : Optional[List] = None lowerCAmelCase : Optional[int] = None lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "dict" lowerCAmelCase : ClassVar[Any] = None lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ ) def UpperCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None lowercase__ : Dict = len(self.languages ) if self.languages else None def __call__( self : List[Any] ) -> List[Any]: """simple docstring""" return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int: """simple docstring""" lowercase__ : List[Any] = set(self.languages ) if self.languages and set(_snake_case ) - lang_set: raise ValueError( f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. lowercase__ : str = [] for lang, text in translation_dict.items(): if isinstance(_snake_case ,_snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) ) return {"language": languages, "translation": translations} def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
16
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a : Optional[Any] = { 'configuration_poolformer': [ 'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PoolFormerConfig', 'PoolFormerOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = ['PoolFormerFeatureExtractor'] a : List[Any] = ['PoolFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ 'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PoolFormerForImageClassification', 'PoolFormerModel', 'PoolFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
56
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]: lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowercase__ : int = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__ : str = datasets.map( __lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__ : Optional[int] = 16 elif accelerator.mixed_precision != "no": lowercase__ : List[Any] = 8 else: lowercase__ : int = None return tokenizer.pad( __lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , ) # Instantiate dataloaders. lowercase__ : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) lowercase__ : str = DataLoader( tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase_ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1": lowercase__ : List[Any] = 2 # Initialize accelerator lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : str = config['''lr'''] lowercase__ : str = int(config['''num_epochs'''] ) lowercase__ : Optional[int] = int(config['''seed'''] ) lowercase__ : Tuple = int(config['''batch_size'''] ) lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__lowerCamelCase ) def inner_training_loop(__lowerCamelCase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__lowerCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__ : Tuple = model.to(accelerator.device ) # Instantiate optimizer lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase ) lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase ) # Instantiate scheduler lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Now we train the model for epoch in range(__lowerCamelCase ): model.train() for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase__ : Dict = model(**__lowerCamelCase ) lowercase__ : List[Any] = outputs.loss accelerator.backward(__lowerCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ : Tuple = model(**__lowerCamelCase ) lowercase__ : Any = outputs.logits.argmax(dim=-1 ) lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__lowerCamelCase , references=__lowerCamelCase , ) lowercase__ : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ) -> Dict: lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowercase__ : int = parser.parse_args() lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
16
0
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=4 , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_attention_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = num_choices def snake_case ( self ): __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_attention_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = None if self.use_token_type_ids: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case ( self ): __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] =True __UpperCAmelCase : List[Any] =( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def snake_case ( self ): __lowerCAmelCase = FlaxRoFormerModelTester(self ) @slow def snake_case ( self ): for model_class_name in self.all_model_classes: __lowerCAmelCase = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=__a ) __lowerCAmelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(__a ) @require_flax class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self ): __lowerCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) __lowerCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]] ) __lowerCAmelCase = model(__a )[0] __lowerCAmelCase = 5_00_00 __lowerCAmelCase = (1, 6, vocab_size) self.assertEqual(output.shape , __a ) __lowerCAmelCase = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
57
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __UpperCAmelCase ( __lowerCamelCase ) -> Any: lowercase__ : Optional[int] = [] embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", f"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", f"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", f"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", f"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict: lowercase__ : str = [] attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple: lowercase__ : List[str] = [] token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') ) return token def __UpperCAmelCase ( ) -> Optional[int]: lowercase__ : List[str] = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int: lowercase__ : List[Any] = '''imagenet-1k-id2label.json''' lowercase__ : Optional[Any] = 10_00 lowercase__ : Optional[Any] = '''huggingface/label-files''' lowercase__ : Dict = num_labels lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) ) lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()} lowercase__ : Optional[Any] = idalabel lowercase__ : str = {v: k for k, v in idalabel.items()} lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": lowercase__ : int = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": lowercase__ : int = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : List[Any] = [2, 2, 20] lowercase__ : Any = [3, 12, 16] lowercase__ : Tuple = [1_92, 7_68, 10_24] lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase ) lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) lowercase__ : List[str] = image_size lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) ) lowercase__ : int = OrderedDict() lowercase__ : List[Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase ) lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase ) for cnt in range(config.depth[idx] ): lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase ) lowercase__ : List[Any] = list_of_state_dict + final() for gg in list_of_state_dict: print(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) ): lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) image_processor.save_pretrained(__lowerCamelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--cvt_model', default='cvt-w24', type=str, help='Name of the cvt model you\'d like to convert.', ) parser.add_argument( '--image_size', default=384, type=int, help='Input Image Size', ) parser.add_argument( '--cvt_file_name', default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth', type=str, help='Input Image Size', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
16
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
58
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str: if not isinstance(__lowerCamelCase , __lowerCamelCase ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowercase__ : Tuple = '''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__lowerCamelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
16
0
import operator as op __lowerCamelCase = """scaler.pt""" __lowerCamelCase = """pytorch_model""" __lowerCamelCase = """random_states""" __lowerCamelCase = """optimizer""" __lowerCamelCase = """scheduler""" __lowerCamelCase = """pytorch_model.bin""" __lowerCamelCase = """pytorch_model.bin.index.json""" __lowerCamelCase = """model.safetensors""" __lowerCamelCase = """model.safetensors.index.json""" __lowerCamelCase = """1.10.2""" __lowerCamelCase = """py38""" __lowerCamelCase = """4.17.0""" __lowerCamelCase = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""] __lowerCamelCase = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""] __lowerCamelCase = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""] __lowerCamelCase = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""] __lowerCamelCase = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""] __lowerCamelCase = """2.0.1""" __lowerCamelCase = ["""pdsh""", """standard""", """openmpi""", """mvapich"""] __lowerCamelCase = ["""default""", """reduce-overhead""", """max-autotune"""] __lowerCamelCase = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 __lowerCamelCase = [ """nnodes""", """nproc_per_node""", """rdzv_backend""", """rdzv_endpoint""", """rdzv_id""", """rdzv_conf""", """standalone""", """max_restarts""", """monitor_interval""", """start_method""", """role""", """module""", """m""", """no_python""", """run_path""", """log_dir""", """r""", """redirects""", """t""", """tee""", """node_rank""", """master_addr""", """master_port""", ] __lowerCamelCase = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""] __lowerCamelCase = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
59
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : '''simple docstring''' def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str: """simple docstring""" lowercase__ : int = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Optional[Any] = image_size lowercase__ : Optional[Any] = num_channels lowercase__ : Optional[Any] = embeddings_size lowercase__ : Optional[Any] = hidden_sizes lowercase__ : str = depths lowercase__ : Tuple = is_training lowercase__ : List[Any] = use_labels lowercase__ : Union[str, Any] = hidden_act lowercase__ : Union[str, Any] = num_labels lowercase__ : Tuple = scope lowercase__ : Optional[Any] = len(_snake_case ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Tuple = None if self.use_labels: lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels ) lowercase__ : int = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return ResNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]: """simple docstring""" lowercase__ : Optional[int] = TFResNetModel(config=_snake_case ) lowercase__ : List[str] = model(_snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple: """simple docstring""" lowercase__ : Tuple = self.num_labels lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case ) lowercase__ : List[str] = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCAmelCase ( self : Tuple ) -> str: """simple docstring""" lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowerCAmelCase : Any = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase : List[Any] = False lowerCAmelCase : List[Any] = False lowerCAmelCase : int = False lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : List[str] = False def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ : Optional[Any] = TFResNetModelTester(self ) lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ) def UpperCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def UpperCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass def UpperCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(_snake_case ) lowercase__ : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[int] = [*signature.parameters.keys()] lowercase__ : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : Tuple ) -> Any: """simple docstring""" lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ): lowercase__ : str = model_class(_snake_case ) lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Tuple = self.model_tester.num_stages self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ : List[Any] = layer_type lowercase__ : Dict = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def UpperCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def __UpperCAmelCase ( ) -> Dict: lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : str ) -> Any: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase__ : Any = self.default_image_processor lowercase__ : int = prepare_img() lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' ) # forward pass lowercase__ : Dict = model(**_snake_case ) # verify the logits lowercase__ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
16
0
"""simple docstring""" from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split snake_case__ : List[str] = datasets.load_iris() snake_case__ : Union[str, Any] = np.array(data['''data''']) snake_case__ : Optional[Any] = np.array(data['''target''']) snake_case__ : Optional[Any] = data['''target_names'''] snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = train_test_split(X, y) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict ): return np.linalg.norm(np.array(_snake_case ) - np.array(_snake_case ) ) def _snake_case ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Dict=5 ): lowerCAmelCase : Optional[Any] = zip(_snake_case , _snake_case ) # List of distances of all points from the point to be classified lowerCAmelCase : Union[str, Any] = [] for data_point in data: lowerCAmelCase : Any = euclidean_distance(data_point[0] , _snake_case ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. lowerCAmelCase : str = [i[1] for i in sorted(_snake_case )[:k]] # Most commonly occurring class among them # is the class into which the point is classified lowerCAmelCase : Optional[Any] = Counter(_snake_case ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
60
"""simple docstring""" import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]: if "model" in orig_key: lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1] lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: lowercase__ : str = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: lowercase__ : Optional[Any] = '''yoso.''' + orig_key return orig_key def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: for key in orig_state_dict.copy().keys(): lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase ) if ("pooler" in key) or ("sen_class" in key): continue else: lowercase__ : Tuple = val lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias'''] lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2 return orig_state_dict def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict'''] lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase ) lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase ) lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase ) print(model.load_state_dict(__lowerCamelCase ) ) model.eval() model.save_pretrained(__lowerCamelCase ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for YOSO model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase_ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
16
0
"""simple docstring""" from math import factorial def __a ( __lowerCamelCase = 20 ): UpperCAmelCase_ : List[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase_ : int = n // 2 return int(factorial(__lowerCamelCase ) / (factorial(__lowerCamelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: _a = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
61
"""simple docstring""" import os def __UpperCAmelCase ( ) -> int: with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file: lowercase__ : List[Any] = str(file.readlines()[0] ) lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' ) names.sort() lowercase__ : int = 0 lowercase__ : Optional[Any] = 0 for i, name in enumerate(__lowerCamelCase ): for letter in name: name_score += ord(__lowerCamelCase ) - 64 total_score += (i + 1) * name_score lowercase__ : List[str] = 0 return total_score if __name__ == "__main__": print(solution())
16
0
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Tuple = PriorTransformer UpperCAmelCase__ : List[str] = "hidden_states" @property def _a ( self ) -> int: __UpperCamelCase =4 __UpperCamelCase =8 __UpperCamelCase =7 __UpperCamelCase =floats_tensor((batch_size, embedding_dim) ).to(A_ ) __UpperCamelCase =floats_tensor((batch_size, embedding_dim) ).to(A_ ) __UpperCamelCase =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(A_ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _a ( self , A_=0 ) -> Dict: torch.manual_seed(A_ ) __UpperCamelCase =4 __UpperCamelCase =8 __UpperCamelCase =7 __UpperCamelCase =torch.randn((batch_size, embedding_dim) ).to(A_ ) __UpperCamelCase =torch.randn((batch_size, embedding_dim) ).to(A_ ) __UpperCamelCase =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(A_ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _a ( self ) -> Tuple: return (4, 8) @property def _a ( self ) -> List[Any]: return (4, 8) def _a ( self ) -> str: __UpperCamelCase ={ 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __UpperCamelCase =self.dummy_input return init_dict, inputs_dict def _a ( self ) -> Union[str, Any]: __UpperCamelCase , __UpperCamelCase =PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(A_ ) __UpperCamelCase =model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _a ( self ) -> Any: __UpperCamelCase , __UpperCamelCase =self.prepare_init_args_and_inputs_for_common() __UpperCamelCase =self.model_class(**A_ ) __UpperCamelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase =[*signature.parameters.keys()] __UpperCamelCase =['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , A_ ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase =PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __UpperCamelCase =model.to(A_ ) if hasattr(A_ , 'set_default_attn_processor' ): model.set_default_attn_processor() __UpperCamelCase =self.get_dummy_seed_input() with torch.no_grad(): __UpperCamelCase =model(**A_ )[0] __UpperCamelCase =output[0, :5].flatten().cpu() print(A_ ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __UpperCamelCase =torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] ) self.assertTrue(torch_all_close(A_ , A_ , rtol=1E-2 ) ) @slow class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self , A_=1 , A_=768 , A_=77 , A_=0 ) -> Union[str, Any]: torch.manual_seed(A_ ) __UpperCamelCase =batch_size __UpperCamelCase =embedding_dim __UpperCamelCase =num_embeddings __UpperCamelCase =torch.randn((batch_size, embedding_dim) ).to(A_ ) __UpperCamelCase =torch.randn((batch_size, embedding_dim) ).to(A_ ) __UpperCamelCase =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(A_ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _a ( self ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ] ) def _a ( self , A_ , A_ ) -> int: __UpperCamelCase =PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(A_ ) __UpperCamelCase =self.get_dummy_seed_input(seed=A_ ) with torch.no_grad(): __UpperCamelCase =model(**A_ )[0] assert list(sample.shape ) == [1, 768] __UpperCamelCase =sample[0, :8].flatten().cpu() print(A_ ) __UpperCamelCase =torch.tensor(A_ ) assert torch_all_close(A_ , A_ , atol=1E-3 )
62
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(A_ ) class __A ( A_ ): '''simple docstring''' def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]: """simple docstring""" super().__init__(**_snake_case ) requires_backends(self ,'''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]: """simple docstring""" return super().__call__(_snake_case ,**_snake_case ) def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ : List[str] = {} if "candidate_labels" in kwargs: lowercase__ : Any = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowercase__ : Optional[Any] = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]: """simple docstring""" lowercase__ : List[Any] = load_image(_snake_case ) lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework ) lowercase__ : str = candidate_labels lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels] lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case ) lowercase__ : Optional[int] = [text_inputs] return inputs def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' ) lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] ,_snake_case ): lowercase__ : List[str] = text_inputs[0] else: # Batching case. lowercase__ : int = text_inputs[0][0] lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case ) lowercase__ : Union[str, Any] = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any: """simple docstring""" lowercase__ : Dict = model_outputs.pop('''candidate_labels''' ) lowercase__ : Optional[Any] = model_outputs['''logits'''][0] if self.framework == "pt": lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) lowercase__ : Tuple = probs.tolist() if not isinstance(_snake_case ,_snake_case ): lowercase__ : Any = [scores] elif self.framework == "tf": lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 ) lowercase__ : Optional[Any] = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) lowercase__ : Union[str, Any] = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] ) ] return result
16
0
'''simple docstring''' def _lowerCamelCase ( lowercase : float , lowercase : int ) -> float: if digit_amount > 0: return round(number - int(lowercase ) , lowercase ) return number - int(lowercase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
63
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]: print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )] for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): lowercase__ : List[str] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(__lowerCamelCase ): # looping through rows of graph array for i in range(__lowerCamelCase ): # looping through columns of graph array for j in range(__lowerCamelCase ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): lowercase__ : str = dist[i][k] + dist[k][j] _print_dist(__lowerCamelCase , __lowerCamelCase ) return dist, v if __name__ == "__main__": lowerCAmelCase_ = int(input('Enter number of vertices: ')) lowerCAmelCase_ = int(input('Enter number of edges: ')) lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): lowerCAmelCase_ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) lowerCAmelCase_ = int(input('Enter source:')) lowerCAmelCase_ = int(input('Enter destination:')) lowerCAmelCase_ = float(input('Enter weight:')) lowerCAmelCase_ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
16
0
"""simple docstring""" import socket def UpperCAmelCase__ (): """simple docstring""" _snake_case : int = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _snake_case : int = socket.gethostname() _snake_case : Any = 1_23_12 sock.connect((host, port) ) sock.send(B"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: _snake_case : Dict = sock.recv(10_24 ) if not data: break out_file.write(snake_case__ ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
64
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class __A ( A_ ): '''simple docstring''' def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None: """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,) super().__init__(*_snake_case ,**_snake_case )
16
0
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge UpperCamelCase__ = [ 'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the' ' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe' ' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.', 'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal' ' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s' ' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the' ' body.', 'Amnesty International releases its annual report on the death penalty. The report catalogs the use of' ' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the' ' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital' ' punishment.', ] UpperCamelCase__ = [ 'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .' ' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz' ' had informed his Lufthansa training school of an episode of severe depression, airline says .', 'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .' ' Israel and the United States opposed the move, which could open the door to war crimes investigations against' ' Israelis .', 'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to' ' death . Organization claims that governments around the world are using the threat of terrorism to advance' ' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death' ' sentences up by 28% .', ] def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase__ = calculate_rouge(__A, __A, bootstrap_aggregation=__A, rouge_keys=["rouge2", "rougeL"] ) assert isinstance(__A, __A ) UpperCAmelCase__ = calculate_rouge(__A, __A, bootstrap_aggregation=__A, rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def lowerCAmelCase_ ( ) -> Tuple: '''simple docstring''' UpperCAmelCase__ = "rougeLsum" UpperCAmelCase__ = calculate_rouge(__A, __A, newline_sep=__A, rouge_keys=[k] )[k] UpperCAmelCase__ = calculate_rouge(__A, __A, newline_sep=__A, rouge_keys=[k] )[k] assert score > score_no_sep def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase__ = ["rouge1", "rouge2", "rougeL"] UpperCAmelCase__ = calculate_rouge(__A, __A, newline_sep=__A, rouge_keys=__A ) UpperCAmelCase__ = calculate_rouge(__A, __A, newline_sep=__A, rouge_keys=__A ) assert score_sep == score_no_sep def lowerCAmelCase_ ( ) -> int: '''simple docstring''' UpperCAmelCase__ = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] UpperCAmelCase__ = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(__A, __A, newline_sep=__A ) == calculate_rouge(__A, __A, newline_sep=__A ) def lowerCAmelCase_ ( ) -> Tuple: '''simple docstring''' UpperCAmelCase__ = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] UpperCAmelCase__ = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] UpperCAmelCase__ = calculate_rouge(__A, __A, rouge_keys=["rougeLsum"], newline_sep=__A )["rougeLsum"] UpperCAmelCase__ = calculate_rouge(__A, __A, rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def lowerCAmelCase_ ( ) -> Optional[int]: '''simple docstring''' UpperCAmelCase__ = Path("examples/seq2seq/test_data/wmt_en_ro" ) UpperCAmelCase__ = calculate_rouge_path(data_dir.joinpath("test.source" ), data_dir.joinpath("test.target" ) ) assert isinstance(__A, __A ) UpperCAmelCase__ = calculate_rouge_path( data_dir.joinpath("test.source" ), data_dir.joinpath("test.target" ), bootstrap_aggregation=__A ) assert isinstance(__A, __A )
65
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
16
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @property def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: torch.manual_seed(0 ) snake_case_ :str = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def lowerCAmelCase_ ( self: List[str] ) -> str: snake_case_ :Union[str, Any] = self.dummy_uncond_unet snake_case_ :Tuple = PNDMScheduler() snake_case_ :str = PNDMPipeline(unet=snake_case , scheduler=snake_case ) pndm.to(snake_case ) pndm.set_progress_bar_config(disable=snake_case ) snake_case_ :int = torch.manual_seed(0 ) snake_case_ :List[Any] = pndm(generator=snake_case , num_inference_steps=20 , output_type="""numpy""" ).images snake_case_ :Optional[Any] = torch.manual_seed(0 ) snake_case_ :Union[str, Any] = pndm(generator=snake_case , num_inference_steps=20 , output_type="""numpy""" , return_dict=snake_case )[0] snake_case_ :Optional[int] = image[0, -3:, -3:, -1] snake_case_ :str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ :List[str] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]: snake_case_ :int = """google/ddpm-cifar10-32""" snake_case_ :List[str] = UNetaDModel.from_pretrained(snake_case ) snake_case_ :Any = PNDMScheduler() snake_case_ :Tuple = PNDMPipeline(unet=snake_case , scheduler=snake_case ) pndm.to(snake_case ) pndm.set_progress_bar_config(disable=snake_case ) snake_case_ :str = torch.manual_seed(0 ) snake_case_ :Any = pndm(generator=snake_case , output_type="""numpy""" ).images snake_case_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ :Optional[int] = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
66
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class __A ( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Any = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Any ) -> Tuple: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Any = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) def UpperCAmelCase ( self : int ) -> List[Any]: """simple docstring""" lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
16
0
'''simple docstring''' from __future__ import annotations def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> list: __lowerCamelCase = [] __lowerCamelCase , __lowerCamelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __lowerCamelCase = result + left + right return input_list def __lowerCAmelCase ( UpperCamelCase__ ) -> list: if len(UpperCamelCase__ ) <= 1: return input_list __lowerCamelCase = list(UpperCamelCase__ ) # iteration for two-way merging __lowerCamelCase = 2 while p <= len(UpperCamelCase__ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): __lowerCamelCase = i __lowerCamelCase = i + p - 1 __lowerCamelCase = (low + high + 1) // 2 __lowerCamelCase = merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # final merge of last two parts if p * 2 >= len(UpperCamelCase__ ): __lowerCamelCase = i __lowerCamelCase = merge(UpperCamelCase__ , 0 , UpperCamelCase__ , len(UpperCamelCase__ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __UpperCAmelCase =input("Enter numbers separated by a comma:\n").strip() if user_input == "": __UpperCAmelCase =[] else: __UpperCAmelCase =[int(item.strip()) for item in user_input.split(",")] print(iter_merge_sort(unsorted))
67
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int: lowercase__ : int = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F'''{solution() = }''')
16
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] ) -> int: '''simple docstring''' if not numbers: return 0 if not isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) or not all( isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for number in numbers ): raise ValueError("numbers must be an iterable of integers" ) A__ = A__ = A__ = numbers[0] for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): # update the maximum and minimum subarray products A__ = numbers[i] if number < 0: A__ , A__ = min_till_now, max_till_now A__ = max(SCREAMING_SNAKE_CASE_ , max_till_now * number ) A__ = min(SCREAMING_SNAKE_CASE_ , min_till_now * number ) # update the maximum product found till now A__ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return max_prod
68
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __A ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" debug_launcher(test_script.main ) def UpperCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" debug_launcher(test_ops.main )
16
0
"""simple docstring""" import os def UpperCAmelCase ( ) -> Union[str, Any]: with open(os.path.dirname(UpperCAmelCase ) + '/p022_names.txt' ) as file: snake_case_ = str(file.readlines()[0] ) snake_case_ = names.replace('"' , '' ).split(',' ) names.sort() snake_case_ = 0 snake_case_ = 0 for i, name in enumerate(UpperCAmelCase ): for letter in name: name_score += ord(UpperCAmelCase ) - 64 total_score += (i + 1) * name_score snake_case_ = 0 return total_score if __name__ == "__main__": print(solution())
69
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase_ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
0
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever A__ : Tuple =logging.getLogger(__name__) class UpperCAmelCase ( snake_case_ ): def __init__( self : int , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Any , __snake_case : str=None ) -> Union[str, Any]: super().__init__( __snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , index=__snake_case , init_retrieval=__snake_case , ) _lowerCAmelCase = None def lowercase__ ( self : List[Any] , __snake_case : int ) -> Optional[Any]: logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually _lowerCAmelCase = self._infer_socket_ifname() # avoid clash with the NCCL port _lowerCAmelCase = str(distributed_port + 1 ) _lowerCAmelCase = dist.new_group(ranks=__snake_case , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowercase__ ( self : str ) -> Any: return dist.get_rank(group=self.process_group ) == 0 def lowercase__ ( self : str , __snake_case : str , __snake_case : List[Any] , __snake_case : str=torch.floataa ) -> List[str]: _lowerCAmelCase = torch.empty(__snake_case , dtype=__snake_case ) dist.scatter(__snake_case , src=0 , scatter_list=__snake_case , group=self.process_group ) return target_tensor def lowercase__ ( self : Optional[Any] ) -> int: _lowerCAmelCase = psutil.net_if_addrs() # a hacky way to deal with varying network interface names _lowerCAmelCase = next((addr for addr in addrs if addr.startswith("""e""" )) , __snake_case ) return ifname def lowercase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : int ) -> Tuple[np.ndarray, List[dict]]: # single GPU training if not dist.is_initialized(): _lowerCAmelCase , _lowerCAmelCase = self._main_retrieve(__snake_case , __snake_case ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__snake_case ) # distributed training _lowerCAmelCase = dist.get_world_size(group=self.process_group ) # gather logic _lowerCAmelCase = None if self._is_main(): _lowerCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__snake_case )] dist.gather(torch.tensor(__snake_case ) , dst=0 , gather_list=__snake_case , group=self.process_group ) # scatter logic _lowerCAmelCase = question_hidden_states.shape[0] _lowerCAmelCase = [] _lowerCAmelCase = [] if self._is_main(): assert len(__snake_case ) == world_size _lowerCAmelCase , _lowerCAmelCase = self._main_retrieve(torch.cat(__snake_case ).numpy() , __snake_case ) _lowerCAmelCase , _lowerCAmelCase = torch.tensor(__snake_case ), torch.tensor(__snake_case ) _lowerCAmelCase = self._chunk_tensor(__snake_case , __snake_case ) _lowerCAmelCase = self._chunk_tensor(__snake_case , __snake_case ) _lowerCAmelCase = self._scattered(__snake_case , [n_queries, n_docs] , target_type=torch.intaa ) _lowerCAmelCase = self._scattered(__snake_case , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__snake_case )
70
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A ( metaclass=A_ ): '''simple docstring''' lowerCAmelCase : List[str] = ["torch", "torchsde"] def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]: """simple docstring""" requires_backends(self ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] )
16
0
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging A_ :List[Any] = logging.get_logger(__name__) def A ( a_ ,a_ ) -> List[Any]: __UpperCamelCase : int =nn.functional.normalize(a_ ) __UpperCamelCase : List[Any] =nn.functional.normalize(a_ ) return torch.mm(a_ ,normalized_text_embeds.t() ) class __A ( a ): """simple docstring""" UpperCamelCase__ : Dict =CLIPConfig UpperCamelCase__ : Union[str, Any] =["""CLIPEncoderLayer"""] def __init__( self , lowerCamelCase__ ): """simple docstring""" super().__init__(lowerCamelCase__ ) __UpperCamelCase : List[Any] =CLIPVisionModel(config.vision_config ) __UpperCamelCase : str =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCamelCase__ ) __UpperCamelCase : Dict =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCamelCase__ ) __UpperCamelCase : Optional[int] =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCamelCase__ ) __UpperCamelCase : Dict =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCamelCase__ ) __UpperCamelCase : Dict =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCamelCase__ ) @torch.no_grad() def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Optional[int] =self.vision_model(lowerCamelCase__ )[1] # pooled_output __UpperCamelCase : Any =self.visual_projection(lowerCamelCase__ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __UpperCamelCase : Optional[int] =cosine_distance(lowerCamelCase__ , self.special_care_embeds ).cpu().float().numpy() __UpperCamelCase : Union[str, Any] =cosine_distance(lowerCamelCase__ , self.concept_embeds ).cpu().float().numpy() __UpperCamelCase : Optional[Any] =[] __UpperCamelCase : Tuple =image_embeds.shape[0] for i in range(lowerCamelCase__ ): __UpperCamelCase : Dict ={'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images __UpperCamelCase : int =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): __UpperCamelCase : Tuple =special_cos_dist[i][concept_idx] __UpperCamelCase : Optional[Any] =self.special_care_embeds_weights[concept_idx].item() __UpperCamelCase : Tuple =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} ) __UpperCamelCase : Optional[int] =0.01 for concept_idx in range(len(cos_dist[0] ) ): __UpperCamelCase : Optional[int] =cos_dist[i][concept_idx] __UpperCamelCase : str =self.concept_embeds_weights[concept_idx].item() __UpperCamelCase : Any =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowerCamelCase__ ) result.append(lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =[len(res['bad_concepts'] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Any =self.vision_model(lowerCamelCase__ )[1] # pooled_output __UpperCamelCase : List[str] =self.visual_projection(lowerCamelCase__ ) __UpperCamelCase : List[Any] =cosine_distance(lowerCamelCase__ , self.special_care_embeds ) __UpperCamelCase : List[Any] =cosine_distance(lowerCamelCase__ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images __UpperCamelCase : List[Any] =0.0 __UpperCamelCase : Union[str, Any] =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) __UpperCamelCase : Dict =torch.any(special_scores > 0 , dim=1 ) __UpperCamelCase : Optional[int] =special_care * 0.01 __UpperCamelCase : Dict =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) __UpperCamelCase : Optional[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) __UpperCamelCase : Optional[int] =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
71
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 class __A ( A_ ): '''simple docstring''' pass def __UpperCAmelCase ( __lowerCamelCase ) -> Dict: for shard in shards: for i in range(__lowerCamelCase ): yield {"i": i, "shard": shard} def __UpperCAmelCase ( ) -> Tuple: lowercase__ : int = int(os.environ['''RANK'''] ) lowercase__ : str = int(os.environ['''WORLD_SIZE'''] ) lowercase__ : List[Any] = ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCamelCase ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase ) parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 ) lowercase__ : int = parser.parse_args() lowercase__ : Optional[Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]} lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase ) if not streaming: lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) ) lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase ) lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase ) lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : str = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) lowercase__ : str = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
16
0
"""simple docstring""" from pathlib import Path import fire def snake_case_ ( A_ : str, A_ : str, A_ : int ): '''simple docstring''' _lowerCamelCase : Optional[int] = Path(A_ ) _lowerCamelCase : Optional[int] = Path(A_ ) dest_dir.mkdir(exist_ok=A_ ) for path in src_dir.iterdir(): _lowerCamelCase : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] _lowerCamelCase : int = dest_dir.joinpath(path.name ) print(A_ ) dest_path.open('''w''' ).write('''\n'''.join(A_ ) ) if __name__ == "__main__": fire.Fire(minify)
72
"""simple docstring""" from ...configuration_utils import PretrainedConfig lowerCAmelCase_ = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : str = "tapas" def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_snake_case ,**_snake_case ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowercase__ : Optional[int] = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Optional[int] = hidden_act lowercase__ : List[Any] = intermediate_size lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Dict = attention_probs_dropout_prob lowercase__ : str = max_position_embeddings lowercase__ : Dict = type_vocab_sizes lowercase__ : Optional[Any] = initializer_range lowercase__ : Dict = layer_norm_eps # Fine-tuning task hyperparameters lowercase__ : Any = positive_label_weight lowercase__ : int = num_aggregation_labels lowercase__ : List[str] = aggregation_loss_weight lowercase__ : Optional[int] = use_answer_as_supervision lowercase__ : Optional[Any] = answer_loss_importance lowercase__ : Union[str, Any] = use_normalized_answer_loss lowercase__ : str = huber_loss_delta lowercase__ : str = temperature lowercase__ : int = aggregation_temperature lowercase__ : List[Any] = use_gumbel_for_cells lowercase__ : Tuple = use_gumbel_for_aggregation lowercase__ : Union[str, Any] = average_approximation_function lowercase__ : Union[str, Any] = cell_selection_preference lowercase__ : Any = answer_loss_cutoff lowercase__ : List[Any] = max_num_rows lowercase__ : str = max_num_columns lowercase__ : int = average_logits_per_cell lowercase__ : str = select_one_column lowercase__ : str = allow_empty_column_selection lowercase__ : Any = init_cell_selection_weights_to_zero lowercase__ : Optional[int] = reset_position_index_per_cell lowercase__ : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters lowercase__ : Optional[Any] = aggregation_labels lowercase__ : List[Any] = no_aggregation_label_index if isinstance(self.aggregation_labels ,_snake_case ): lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
16
0
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0**9 ) -> int: __lowerCamelCase : Optional[Any] = 1 __lowerCamelCase : Tuple = 2 __lowerCamelCase : Union[str, Any] = 0 __lowerCamelCase : List[Any] = 0 __lowerCamelCase : List[Any] = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __lowerCamelCase : Optional[int] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F"""{solution() = }""")
73
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : '''simple docstring''' def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]: """simple docstring""" lowercase__ : Dict = parent lowercase__ : Any = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Dict = patch_size lowercase__ : int = num_channels lowercase__ : Any = embed_dim lowercase__ : int = depths lowercase__ : Dict = num_heads lowercase__ : List[Any] = window_size lowercase__ : int = mlp_ratio lowercase__ : Optional[int] = qkv_bias lowercase__ : str = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : Dict = drop_path_rate lowercase__ : int = hidden_act lowercase__ : Tuple = use_absolute_embeddings lowercase__ : Tuple = patch_norm lowercase__ : Tuple = layer_norm_eps lowercase__ : Optional[Any] = initializer_range lowercase__ : int = is_training lowercase__ : Optional[int] = scope lowercase__ : str = use_labels lowercase__ : Dict = type_sequence_label_size lowercase__ : Union[str, Any] = encoder_stride def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None if self.use_labels: lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : Any = SwinvaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ) lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any: """simple docstring""" lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Tuple = model(_snake_case ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ : Optional[int] = 1 lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : str = model(_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any: """simple docstring""" lowercase__ : Tuple = self.type_sequence_label_size lowercase__ : Dict = SwinvaForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Union[str, Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowerCAmelCase : Optional[int] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : List[Any] = False lowerCAmelCase : Dict = False lowerCAmelCase : List[Any] = False lowerCAmelCase : Any = False def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[Any] = SwinvaModelTester(self ) lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 ) def UpperCAmelCase ( self : int ) -> Any: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : str ) -> List[Any]: """simple docstring""" lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' ) def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''' ) def UpperCAmelCase ( self : List[str] ) -> str: """simple docstring""" pass def UpperCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowercase__ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) ) def UpperCAmelCase ( self : int ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(_snake_case ) lowercase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple = True for model_class in self.all_model_classes: lowercase__ : Optional[int] = True lowercase__ : str = False lowercase__ : Union[str, Any] = True lowercase__ : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Dict = outputs.attentions lowercase__ : Any = len(self.model_tester.depths ) self.assertEqual(len(_snake_case ) ,_snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : List[Any] = True lowercase__ : Optional[Any] = config.window_size**2 lowercase__ : Any = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(_snake_case ) ,_snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) lowercase__ : Optional[Any] = len(_snake_case ) # Check attention is always last and order is fine lowercase__ : Optional[int] = True lowercase__ : Tuple = True lowercase__ : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) if hasattr(self.model_tester ,'''num_hidden_states_types''' ): lowercase__ : int = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowercase__ : List[str] = 2 self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) ) lowercase__ : Optional[int] = outputs.attentions self.assertEqual(len(_snake_case ) ,_snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ : List[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Optional[int] = outputs.hidden_states lowercase__ : List[Any] = getattr( self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_snake_case ) ,_snake_case ) # Swinv2 has a different seq_length lowercase__ : Dict = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) lowercase__ : Tuple = outputs.reshaped_hidden_states self.assertEqual(len(_snake_case ) ,_snake_case ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape lowercase__ : int = ( reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def UpperCAmelCase ( self : Tuple ) -> int: """simple docstring""" lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : str = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) def UpperCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = 3 lowercase__ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase__ : Optional[int] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowercase__ : str = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) def UpperCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def UpperCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: lowercase__ : Optional[int] = model_class(config=_snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @require_vision @require_torch class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : Any ) -> List[str]: """simple docstring""" lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to( _snake_case ) lowercase__ : Union[str, Any] = self.default_image_processor lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case ) # forward pass with torch.no_grad(): lowercase__ : Optional[Any] = model(**_snake_case ) # verify the logits lowercase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
16
0
"""simple docstring""" import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) set_seed(7_70) _lowercase = { '''c_attn''': '''att_proj''', '''c_proj''': '''out_proj''', '''c_fc''': '''in_proj''', '''transformer.''': '''''', '''h.''': '''layers.''', '''ln_1''': '''layernorm_1''', '''ln_2''': '''layernorm_2''', '''ln_f''': '''layernorm_final''', '''wpe''': '''position_embeds_layer''', '''wte''': '''input_embeds_layer''', } _lowercase = { '''text_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text.pt''', }, '''coarse_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse.pt''', }, '''fine_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine.pt''', }, '''text''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text_2.pt''', }, '''coarse''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse_2.pt''', }, '''fine''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine_2.pt''', }, } _lowercase = os.path.dirname(os.path.abspath(__file__)) _lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''') _lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''') def _snake_case ( snake_case__ : Tuple , snake_case__ : List[str]=False ): A = model_type if use_small: key += "_small" return os.path.join(snake_case__ , REMOTE_MODEL_PATHS[key]['file_name'] ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Tuple ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) hf_hub_download(repo_id=snake_case__ , filename=snake_case__ , local_dir=snake_case__ ) def _snake_case ( snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Any=False , snake_case__ : List[str]="text" ): if model_type == "text": A = BarkSemanticModel A = BarkSemanticConfig A = BarkSemanticGenerationConfig elif model_type == "coarse": A = BarkCoarseModel A = BarkCoarseConfig A = BarkCoarseGenerationConfig elif model_type == "fine": A = BarkFineModel A = BarkFineConfig A = BarkFineGenerationConfig else: raise NotImplementedError() A = F'{model_type}_small' if use_small else model_type A = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(snake_case__ ): logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info['repo_id'] , model_info['file_name'] ) A = torch.load(snake_case__ , map_location=snake_case__ ) # this is a hack A = checkpoint['model_args'] if "input_vocab_size" not in model_args: A = model_args['vocab_size'] A = model_args['vocab_size'] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments A = model_args.pop('n_head' ) A = model_args.pop('n_embd' ) A = model_args.pop('n_layer' ) A = ConfigClass(**checkpoint['model_args'] ) A = ModelClass(config=snake_case__ ) A = GenerationConfigClass() A = model_generation_config A = checkpoint['model'] # fixup checkpoint A = '_orig_mod.' for k, v in list(state_dict.items() ): if k.startswith(snake_case__ ): # replace part of the key with corresponding layer name in HF implementation A = k[len(snake_case__ ) :] for old_layer_name in new_layer_name_dict: A = new_k.replace(snake_case__ , new_layer_name_dict[old_layer_name] ) A = state_dict.pop(snake_case__ ) A = set(state_dict.keys() ) - set(model.state_dict().keys() ) A = {k for k in extra_keys if not k.endswith('.attn.bias' )} A = set(model.state_dict().keys() ) - set(state_dict.keys() ) A = {k for k in missing_keys if not k.endswith('.attn.bias' )} if len(snake_case__ ) != 0: raise ValueError(F'extra keys found: {extra_keys}' ) if len(snake_case__ ) != 0: raise ValueError(F'missing keys: {missing_keys}' ) model.load_state_dict(snake_case__ , strict=snake_case__ ) A = model.num_parameters(exclude_embeddings=snake_case__ ) A = checkpoint['best_val_loss'].item() logger.info(F'model loaded: {round(n_params/1e6 , 1 )}M params, {round(snake_case__ , 3 )} loss' ) model.eval() model.to(snake_case__ ) del checkpoint, state_dict return model def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[Any]=False , snake_case__ : List[Any]="text" ): if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() A = 'cpu' # do conversion on cpu A = _get_ckpt_path(snake_case__ , use_small=snake_case__ ) A = _load_model(snake_case__ , snake_case__ , model_type=snake_case__ , use_small=snake_case__ ) # load bark initial model A = _bark_load_model(snake_case__ , 'cpu' , model_type=snake_case__ , use_small=snake_case__ ) if model_type == "text": A = bark_model['model'] if model.num_parameters(exclude_embeddings=snake_case__ ) != bark_model.get_num_params(): raise ValueError('initial and new models don\'t have the same number of parameters' ) # check if same output as the bark model A = 5 A = 10 if model_type in ["text", "coarse"]: A = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) A = bark_model(snake_case__ )[0] A = model(snake_case__ ) # take last logits A = output_new_model_total.logits[:, [-1], :] else: A = 3 A = 8 A = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) A = model(snake_case__ , snake_case__ ) A = bark_model(snake_case__ , snake_case__ ) A = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('initial and new outputs don\'t have the same shape' ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError('initial and new outputs are not equal' ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : Union[str, Any] , ): A = os.path.join(snake_case__ , snake_case__ ) A = BarkSemanticConfig.from_pretrained(os.path.join(snake_case__ , 'config.json' ) ) A = BarkCoarseConfig.from_pretrained(os.path.join(snake_case__ , 'config.json' ) ) A = BarkFineConfig.from_pretrained(os.path.join(snake_case__ , 'config.json' ) ) A = EncodecConfig.from_pretrained('facebook/encodec_24khz' ) A = BarkSemanticModel.from_pretrained(snake_case__ ) A = BarkCoarseModel.from_pretrained(snake_case__ ) A = BarkFineModel.from_pretrained(snake_case__ ) A = EncodecModel.from_pretrained('facebook/encodec_24khz' ) A = BarkConfig.from_sub_model_configs( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) A = BarkModel(snake_case__ ) A = semantic A = coarseAcoustic A = fineAcoustic A = codec A = bark_generation_config Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) bark.save_pretrained(snake_case__ , repo_id=snake_case__ , push_to_hub=snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''') _lowercase = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
74
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict: """simple docstring""" import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]: """simple docstring""" if NLTK_VERSION >= version.Version('''3.6.5''' ): lowercase__ : int = [ meteor_score.single_meteor_score( word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] else: lowercase__ : Tuple = [ meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] return {"meteor": np.mean(_snake_case )}
16
0
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __UpperCamelCase ( unittest.TestCase ): lowercase : Optional[int] =MODEL_FOR_MASKED_LM_MAPPING lowercase : Any =TF_MODEL_FOR_MASKED_LM_MAPPING def lowercase__ ( self ): """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', top_k=2, framework='''tf''' ) lowerCamelCase_ =unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ {'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 38_015, '''token_str''': ''' grouped'''}, {'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 25_506, '''token_str''': ''' accuser'''}, ], ) lowerCamelCase_ =unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ { '''sequence''': '''The largest city in France is grouped''', '''score''': 2.1e-05, '''token''': 38_015, '''token_str''': ''' grouped''', }, { '''sequence''': '''The largest city in France is accuser''', '''score''': 2.1e-05, '''token''': 25_506, '''token_str''': ''' accuser''', }, ], ) lowerCamelCase_ =unmasker('''My name is <mask>''', targets=[''' Patrick''', ''' Clara''', ''' Teven'''], top_k=3 ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13_606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3_499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2_941, '''token_str''': ''' Te'''}, ], ) @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', top_k=2, framework='''pt''' ) lowerCamelCase_ =unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ {'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 35_676, '''token_str''': ''' Maul'''}, {'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS'''}, ], ) lowerCamelCase_ =unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ { '''sequence''': '''The largest city in France is Maul''', '''score''': 2.2e-05, '''token''': 35_676, '''token_str''': ''' Maul''', }, {'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS'''}, ], ) lowerCamelCase_ =unmasker('''My name is <mask>''', targets=[''' Patrick''', ''' Clara''', ''' Teven'''], top_k=3 ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ {'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3_499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2_941, '''token_str''': ''' Te'''}, {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13_606, '''token_str''': ''' Clara'''}, ], ) lowerCamelCase_ =unmasker('''My name is <mask> <mask>''', top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ [ { '''score''': 2.2e-05, '''token''': 35_676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is Maul<mask></s>''', }, {'''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''}, ], [ { '''score''': 2.2e-05, '''token''': 35_676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is<mask> Maul</s>''', }, {'''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''}, ], ], ) @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline('''fill-mask''', model='''hf-internal-testing/tiny-random-distilbert''', device=0, framework='''pt''' ) # convert model to fp16 pipe.model.half() lowerCamelCase_ =pipe('''Paris is the [MASK] of France.''' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''distilroberta-base''', top_k=2, framework='''pt''' ) self.run_large_test(lowerCAmelCase ) @slow @require_tf def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''distilroberta-base''', top_k=2, framework='''tf''' ) self.run_large_test(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase ), [ {'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 610, '''token_str''': ''' John'''}, {'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 1_573, '''token_str''': ''' Chris'''}, ], ) lowerCamelCase_ =unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase ), [ { '''sequence''': '''The largest city in France is Paris''', '''score''': 0.2_5_1, '''token''': 2_201, '''token_str''': ''' Paris''', }, { '''sequence''': '''The largest city in France is Lyon''', '''score''': 0.2_1_4, '''token''': 12_790, '''token_str''': ''' Lyon''', }, ], ) lowerCamelCase_ =unmasker('''My name is <mask>''', targets=[''' Patrick''', ''' Clara''', ''' Teven'''], top_k=3 ) self.assertEqual( nested_simplify(lowerCAmelCase ), [ {'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 3_499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 13_606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 2_941, '''token_str''': ''' Te'''}, ], ) @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', framework='''pt''' ) lowerCamelCase_ =None lowerCamelCase_ =None self.run_pipeline_test(lowerCAmelCase, [] ) @require_tf def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', framework='''tf''' ) lowerCamelCase_ =None lowerCamelCase_ =None self.run_pipeline_test(lowerCAmelCase, [] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' ) lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) lowerCamelCase_ =[ f'''This is another {tokenizer.mask_token} test''', ] return fill_masker, examples def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =fill_masker.tokenizer lowerCamelCase_ =fill_masker.model lowerCamelCase_ =fill_masker( f'''This is a {tokenizer.mask_token}''', ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) lowerCamelCase_ =fill_masker([f'''This is a {tokenizer.mask_token}'''] ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) lowerCamelCase_ =fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] ) self.assertEqual( lowerCAmelCase, [ [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ], ) with self.assertRaises(lowerCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(lowerCAmelCase ): fill_masker('''This is''' ) self.run_test_top_k(lowerCAmelCase, lowerCAmelCase ) self.run_test_targets(lowerCAmelCase, lowerCAmelCase ) self.run_test_top_k_targets(lowerCAmelCase, lowerCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(lowerCAmelCase, lowerCAmelCase ) self.fill_mask_with_multiple_masks(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =tokenizer.get_vocab() lowerCamelCase_ =sorted(vocab.keys() )[:2] # Pipeline argument lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase, targets=lowerCAmelCase ) lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) lowerCamelCase_ ={vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs}, lowerCAmelCase ) lowerCamelCase_ =[tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs}, set(lowerCAmelCase ) ) # Call argument lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCAmelCase ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) lowerCamelCase_ ={vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs}, lowerCAmelCase ) lowerCamelCase_ =[tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs}, set(lowerCAmelCase ) ) # Score equivalence lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCAmelCase ) lowerCamelCase_ =[top_mask['''token_str'''] for top_mask in outputs] lowerCamelCase_ =[top_mask['''score'''] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCAmelCase ) == set(lowerCAmelCase ): lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCAmelCase ) lowerCamelCase_ =[top_mask['''score'''] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(lowerCAmelCase ), nested_simplify(lowerCAmelCase ) ) # Raises with invalid with self.assertRaises(lowerCAmelCase ): lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(lowerCAmelCase ): lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=[''''''] ) with self.assertRaises(lowerCAmelCase ): lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets='''''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase, top_k=2 ) lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=2 ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) self.assertEqual(nested_simplify(lowerCAmelCase ), nested_simplify(lowerCAmelCase ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =tokenizer.get_vocab() lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) # top_k=2, ntargets=3 lowerCamelCase_ =sorted(vocab.keys() )[:3] lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=2, targets=lowerCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results lowerCamelCase_ =[el['''token_str'''] for el in sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x["score"], reverse=lowerCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCAmelCase ).issubset(lowerCAmelCase ): lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=3, targets=lowerCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(lowerCAmelCase ), nested_simplify(lowerCAmelCase ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) lowerCamelCase_ =tokenizer.get_vocab() # String duplicates + id duplicates lowerCamelCase_ =sorted(vocab.keys() )[:3] lowerCamelCase_ =[targets[0], targets[1], targets[0], targets[2], targets[1]] lowerCamelCase_ =fill_masker(f'''My name is {tokenizer.mask_token}''', targets=lowerCAmelCase, top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(lowerCAmelCase ), 3 ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) lowerCamelCase_ =fill_masker( f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''', top_k=2 ) self.assertEqual( lowerCAmelCase, [ [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ], )
75
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = '▁' lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'} lowerCAmelCase_ = { 'vocab_file': { 'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model', } } lowerCAmelCase_ = { 'facebook/xglm-564M': 2_048, } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : int = ["input_ids", "attention_mask"] def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None: """simple docstring""" lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase__ : Any = 7 lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )] lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,) lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_snake_case ) ) lowercase__ : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase__ : Optional[int] = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase__ : List[str] = len(self.sp_model ) lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(_snake_case ) lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int ) -> Optional[int]: """simple docstring""" lowercase__ : List[Any] = self.__dict__.copy() lowercase__ : Optional[int] = None lowercase__ : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any: """simple docstring""" lowercase__ : int = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): lowercase__ : Dict = {} lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase__ : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case ) if token_ids_a is None: return [1] + ([0] * len(_snake_case )) return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ : List[Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def UpperCAmelCase ( self : str ) -> Tuple: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_snake_case ,out_type=_snake_case ) def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip() return out_string def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : Any = os.path.join( _snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case ,'''wb''' ) as fi: lowercase__ : Dict = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,)
16
0
import random from typing import Any def lowerCamelCase__ ( _a): for _ in range(len(_a)): SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(_a) - 1) SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , len(_a) - 1) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = data[b], data[a] return data if __name__ == "__main__": a_ = [0, 1, 2, 3, 4, 5, 6, 7] a_ = ['python', 'says', 'hello', '!'] print('Fisher-Yates Shuffle:') print('List', integers, strings) print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
76
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]: print(f"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase ) else: lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase ) if hidden_sizes == 1_92: lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase ) if hidden_sizes == 2_56: lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase ) if hidden_sizes == 3_84: lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase ) from_model.eval() lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval() lowercase__ : str = OrderedDict() lowercase__ : int = from_model.state_dict() lowercase__ : Dict = list(from_model.state_dict().keys() ) lowercase__ : Any = list(our_model.state_dict().keys() ) print(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for i in range(len(__lowerCamelCase ) ): lowercase__ : str = weights[og_keys[i]] our_model.load_state_dict(__lowerCamelCase ) lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) ) lowercase__ : Optional[int] = from_model(__lowerCamelCase ) lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one." lowercase__ : Any = name print(__lowerCamelCase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) lowercase__ : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f"""Pushed {checkpoint_name}""" ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]: lowercase__ : Any = '''imagenet-1k-id2label.json''' lowercase__ : Tuple = 10_00 lowercase__ : Dict = (1, num_labels) lowercase__ : List[str] = '''huggingface/label-files''' lowercase__ : str = num_labels lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} lowercase__ : Union[str, Any] = idalabel lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()} lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) lowercase__ : Tuple = { '''levit-128S''': 1_28, '''levit-128''': 1_28, '''levit-192''': 1_92, '''levit-256''': 2_56, '''levit-384''': 3_84, } lowercase__ : Any = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, expected_shape if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
16
0
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _UpperCamelCase : List[str] = 8 def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=BITS ): '''simple docstring''' lowercase__ : List[Any] = x.device lowercase__ : List[str] = (x * 255).int().clamp(0 , 255 ) lowercase__ : Union[str, Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase ) lowercase__ : Dict = rearrange(_lowerCAmelCase , 'd -> d 1 1' ) lowercase__ : List[Any] = rearrange(_lowerCAmelCase , 'b c h w -> b c 1 h w' ) lowercase__ : Any = ((x & mask) != 0).float() lowercase__ : Optional[int] = rearrange(_lowerCAmelCase , 'b c d h w -> b (c d) h w' ) lowercase__ : int = bits * 2 - 1 return bits def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=BITS ): '''simple docstring''' lowercase__ : Dict = x.device lowercase__ : str = (x > 0).int() lowercase__ : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase , dtype=torch.intaa ) lowercase__ : Union[str, Any] = rearrange(_lowerCAmelCase , 'd -> d 1 1' ) lowercase__ : Tuple = rearrange(_lowerCAmelCase , 'b (c d) h w -> b c d h w' , d=8 ) lowercase__ : Union[str, Any] = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 255).clamp(0.0 , 1.0 ) def a_ ( self : List[Any] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : bool = True , ): '''simple docstring''' if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) lowercase__ : Union[str, Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas lowercase__ : Optional[Any] = self.alphas_cumprod[timestep] lowercase__ : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod lowercase__ : Optional[int] = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" lowercase__ : str = self.bit_scale if self.config.clip_sample: lowercase__ : Tuple = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) lowercase__ : int = self._get_variance(_lowerCAmelCase , _lowerCAmelCase ) lowercase__ : List[str] = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide lowercase__ : str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowercase__ : Tuple = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowercase__ : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 lowercase__ : Optional[Any] = model_output.device if torch.is_tensor(_lowerCAmelCase ) else 'cpu' lowercase__ : Tuple = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_lowerCAmelCase ).to(_lowerCAmelCase ) lowercase__ : Tuple = self._get_variance(_lowerCAmelCase , _lowerCAmelCase ) ** 0.5 * eta * noise lowercase__ : int = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def a_ ( self : List[str] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int="epsilon" , _lowerCAmelCase : int=None , _lowerCAmelCase : bool = True , ): '''simple docstring''' lowercase__ : Dict = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: lowercase__ , lowercase__ : List[Any] = torch.split(_lowerCAmelCase , sample.shape[1] , dim=1 ) else: lowercase__ : Tuple = None # 1. compute alphas, betas lowercase__ : int = self.alphas_cumprod[t] lowercase__ : Union[str, Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one lowercase__ : Optional[int] = 1 - alpha_prod_t lowercase__ : Any = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": lowercase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": lowercase__ : Optional[Any] = model_output else: raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" lowercase__ : Optional[int] = self.bit_scale if self.config.clip_sample: lowercase__ : Any = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t lowercase__ : Tuple = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowercase__ : Optional[int] = 0 if t > 0: lowercase__ : Dict = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_lowerCAmelCase ).to(model_output.device ) lowercase__ : List[str] = (self._get_variance(_lowerCAmelCase , predicted_variance=_lowerCAmelCase ) ** 0.5) * noise lowercase__ : Union[str, Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) class UpperCAmelCase_ ( _a): def __init__( self , a , a , a = 1.0 , ) -> str: super().__init__() lowercase__ : Any = bit_scale lowercase__ : str = ( ddim_bit_scheduler_step if isinstance(a , a ) else ddpm_bit_scheduler_step ) self.register_modules(unet=a , scheduler=a ) @torch.no_grad() def __call__( self , a = 2_5_6 , a = 2_5_6 , a = 5_0 , a = None , a = 1 , a = "pil" , a = True , **a , ) -> Union[Tuple, ImagePipelineOutput]: lowercase__ : Optional[Any] = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=a , ) lowercase__ : Optional[int] = decimal_to_bits(a ) * self.bit_scale lowercase__ : Dict = latents.to(self.device ) self.scheduler.set_timesteps(a ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual lowercase__ : str = self.unet(a , a ).sample # compute the previous noisy sample x_t -> x_t-1 lowercase__ : Tuple = self.scheduler.step(a , a , a ).prev_sample lowercase__ : List[str] = bits_to_decimal(a ) if output_type == "pil": lowercase__ : Optional[int] = self.numpy_to_pil(a ) if not return_dict: return (image,) return ImagePipelineOutput(images=a )
77
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __A : '''simple docstring''' lowerCAmelCase : List[str] lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "dict" lowerCAmelCase : ClassVar[Any] = None lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ ) def __call__( self : List[str] ) -> Any: """simple docstring""" return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class __A : '''simple docstring''' lowerCAmelCase : Optional[List] = None lowerCAmelCase : Optional[int] = None lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "dict" lowerCAmelCase : ClassVar[Any] = None lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ ) def UpperCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None lowercase__ : Dict = len(self.languages ) if self.languages else None def __call__( self : List[Any] ) -> List[Any]: """simple docstring""" return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int: """simple docstring""" lowercase__ : List[Any] = set(self.languages ) if self.languages and set(_snake_case ) - lang_set: raise ValueError( f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. lowercase__ : str = [] for lang, text in translation_dict.items(): if isinstance(_snake_case ,_snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) ) return {"language": languages, "translation": translations} def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
16
0
"""simple docstring""" def _lowerCAmelCase ( ): UpperCAmelCase = [] UpperCAmelCase = 1 while len(lowercase_ ) < 1e6: constant.append(str(lowercase_ ) ) i += 1 UpperCAmelCase = ''.join(lowercase_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
78
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]: lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowercase__ : int = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__ : str = datasets.map( __lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__ : Optional[int] = 16 elif accelerator.mixed_precision != "no": lowercase__ : List[Any] = 8 else: lowercase__ : int = None return tokenizer.pad( __lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , ) # Instantiate dataloaders. lowercase__ : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) lowercase__ : str = DataLoader( tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase_ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1": lowercase__ : List[Any] = 2 # Initialize accelerator lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : str = config['''lr'''] lowercase__ : str = int(config['''num_epochs'''] ) lowercase__ : Optional[int] = int(config['''seed'''] ) lowercase__ : Tuple = int(config['''batch_size'''] ) lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__lowerCamelCase ) def inner_training_loop(__lowerCamelCase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__lowerCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__ : Tuple = model.to(accelerator.device ) # Instantiate optimizer lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase ) lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase ) # Instantiate scheduler lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Now we train the model for epoch in range(__lowerCamelCase ): model.train() for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase__ : Dict = model(**__lowerCamelCase ) lowercase__ : List[Any] = outputs.loss accelerator.backward(__lowerCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ : Tuple = model(**__lowerCamelCase ) lowercase__ : Any = outputs.logits.argmax(dim=-1 ) lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__lowerCamelCase , references=__lowerCamelCase , ) lowercase__ : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ) -> Dict: lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowercase__ : int = parser.parse_args() lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
16
0
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase ) -> Image: '''simple docstring''' _A , _A = image.size _A = 0 _A = image.load() for i in range(__lowercase ): for j in range(__lowercase ): _A = pixels[j, i] mean += pixel mean //= width * height for j in range(__lowercase ): for i in range(__lowercase ): _A = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": lowerCamelCase_ = mean_threshold(Image.open('''path_to_image''').convert('''L''')) image.save('''output_image_path''')
79
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __UpperCAmelCase ( __lowerCamelCase ) -> Any: lowercase__ : Optional[int] = [] embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", f"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", f"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", f"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", f"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict: lowercase__ : str = [] attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple: lowercase__ : List[str] = [] token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') ) return token def __UpperCAmelCase ( ) -> Optional[int]: lowercase__ : List[str] = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int: lowercase__ : List[Any] = '''imagenet-1k-id2label.json''' lowercase__ : Optional[Any] = 10_00 lowercase__ : Optional[Any] = '''huggingface/label-files''' lowercase__ : Dict = num_labels lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) ) lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()} lowercase__ : Optional[Any] = idalabel lowercase__ : str = {v: k for k, v in idalabel.items()} lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": lowercase__ : int = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": lowercase__ : int = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : List[Any] = [2, 2, 20] lowercase__ : Any = [3, 12, 16] lowercase__ : Tuple = [1_92, 7_68, 10_24] lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase ) lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) lowercase__ : List[str] = image_size lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) ) lowercase__ : int = OrderedDict() lowercase__ : List[Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase ) lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase ) for cnt in range(config.depth[idx] ): lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase ) lowercase__ : List[Any] = list_of_state_dict + final() for gg in list_of_state_dict: print(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) ): lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) image_processor.save_pretrained(__lowerCamelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--cvt_model', default='cvt-w24', type=str, help='Name of the cvt model you\'d like to convert.', ) parser.add_argument( '--image_size', default=384, type=int, help='Input Image Size', ) parser.add_argument( '--cvt_file_name', default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth', type=str, help='Input Image Size', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
16
0
'''simple docstring''' import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate a__ : Dict = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('', '|', '|'), datarow=DataRow('', '|', '|'), padding=1, with_header_hide=None, ) a__ : Dict = [] a__ : Tuple = [] a__ : Union[str, Any] = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}} a__ : List[Any] = [ { 'type': 'header', 'text': { 'type': 'plain_text', 'text': F"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""", 'emoji': True, }, } ] a__ : Union[str, Any] = 0 for log in Path().glob('*.log'): a__ : Tuple = 0 with open(log, 'r') as f: for line in f: a__ : Optional[Any] = json.loads(line) if line.get('nodeid', '') != "": a__ : Union[str, Any] = line['nodeid'] if line.get('duration', None) is not None: a__ : Optional[Any] = F"""{line['duration']:.4f}""" if line.get('outcome', '') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('_')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) a__ : List[Any] = [] log.unlink() a__ : Optional[int] = '' a__ : Any = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" a__ : List[Any] = [] a__ : List[str] = {} for test in failed_tests: a__ : List[Any] = test[0].split('::') a__ : List[str] = data[0].split('/')[-1] if data[0] not in filesafailed: a__ : List[Any] = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) a__ : Tuple = [test[0] for test in failed_table] a__ : List[Any] = list(set(files)) # Count number of instances in failed_tests a__ : Union[str, Any] = [] for file in individual_files: table.append([file, len(filesafailed[file])]) a__ : int = tabulate( table, headers=['Test Location', 'Num Failed'], tablefmt=hf_table_format, stralign='right', ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_0_0_0: a__ : Any = 'Too many failed tests, please see the full report in the Action results.' a__ : int = len(err) + 1_0 a__ : Optional[Any] = message[: 3_0_0_0 - offset] + F"""\n...\n```\n{err}""" print(F"""### {message}""") else: a__ : int = 'No failed tests! 🤗' print(F"""## {message}""") payload.append(no_error_payload) if os.environ.get('TEST_TYPE', '') != "": from slack_sdk import WebClient a__ : List[str] = WebClient(token=os.environ['SLACK_API_TOKEN']) if message != "No failed tests! 🤗": a__ : str = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': message, }, } payload.append(md_report) a__ : Tuple = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': '*For more details:*', }, 'accessory': { 'type': 'button', 'text': { 'type': 'plain_text', 'text': 'Check Action results', 'emoji': True, }, 'url': F"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } payload.append(action_button) a__ : str = { 'type': 'context', 'elements': [ { 'type': 'plain_text', 'text': F"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""", } ], } payload.append(date_report) a__ : List[str] = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload) a__ : List[str] = response.data['ts'] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name a__ : int = '' for i, row in enumerate(test_failures): if row[0] != test_class: a__ : List[str] = row[0] else: a__ : List[Any] = '' a__ : str = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""", }, } client.chat_postMessage( channel='#accelerate-ci-daily', thread_ts=ts, blocks=[payload], )
80
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str: if not isinstance(__lowerCamelCase , __lowerCamelCase ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowercase__ : Tuple = '''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__lowerCamelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
16
0
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ : Dict = logging.get_logger(__name__) lowerCamelCase_ : List[str] = { """salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""", } class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = "blip_2_vision_model" def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-1_0 , __A=True , **__A , ) -> Any: super().__init__(**__A ) a =hidden_size a =intermediate_size a =num_hidden_layers a =num_attention_heads a =patch_size a =image_size a =initializer_range a =attention_dropout a =layer_norm_eps a =hidden_act a =qkv_bias @classmethod def SCREAMING_SNAKE_CASE ( cls , __A , **__A ) -> "PretrainedConfig": cls._set_token_in_kwargs(__A ) a , a =cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('''model_type''' ) == "blip-2": a =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = "blip_2_qformer" def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-1_2 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> Dict: super().__init__(pad_token_id=__A , **__A ) a =vocab_size a =hidden_size a =num_hidden_layers a =num_attention_heads a =hidden_act a =intermediate_size a =hidden_dropout_prob a =attention_probs_dropout_prob a =max_position_embeddings a =initializer_range a =layer_norm_eps a =position_embedding_type a =cross_attention_frequency a =encoder_hidden_size @classmethod def SCREAMING_SNAKE_CASE ( cls , __A , **__A ) -> "PretrainedConfig": cls._set_token_in_kwargs(__A ) a , a =cls.get_config_dict(__A , **__A ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('''model_type''' ) == "blip-2": a =config_dict['''qformer_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = "blip-2" __lowerCAmelCase = True def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int: super().__init__(**__A ) if vision_config is None: a ={} logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' ) if qformer_config is None: a ={} logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' ) if text_config is None: a ={} logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' ) a =BlipaVisionConfig(**__A ) a =BlipaQFormerConfig(**__A ) a =text_config['''model_type'''] if '''model_type''' in text_config else '''opt''' a =CONFIG_MAPPING[text_model_type](**__A ) a =self.text_config.tie_word_embeddings a =self.text_config.is_encoder_decoder a =num_query_tokens a =self.vision_config.hidden_size a =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES a =1.0 a =0.02 @classmethod def SCREAMING_SNAKE_CASE ( cls , __A , __A , __A , **__A , ) -> Any: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , ) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: a =copy.deepcopy(self.__dict__ ) a =self.vision_config.to_dict() a =self.qformer_config.to_dict() a =self.text_config.to_dict() a =self.__class__.model_type return output
81
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : '''simple docstring''' def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str: """simple docstring""" lowercase__ : int = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Optional[Any] = image_size lowercase__ : Optional[Any] = num_channels lowercase__ : Optional[Any] = embeddings_size lowercase__ : Optional[Any] = hidden_sizes lowercase__ : str = depths lowercase__ : Tuple = is_training lowercase__ : List[Any] = use_labels lowercase__ : Union[str, Any] = hidden_act lowercase__ : Union[str, Any] = num_labels lowercase__ : Tuple = scope lowercase__ : Optional[Any] = len(_snake_case ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Tuple = None if self.use_labels: lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels ) lowercase__ : int = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return ResNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]: """simple docstring""" lowercase__ : Optional[int] = TFResNetModel(config=_snake_case ) lowercase__ : List[str] = model(_snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple: """simple docstring""" lowercase__ : Tuple = self.num_labels lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case ) lowercase__ : List[str] = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCAmelCase ( self : Tuple ) -> str: """simple docstring""" lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowerCAmelCase : Any = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase : List[Any] = False lowerCAmelCase : List[Any] = False lowerCAmelCase : int = False lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : List[str] = False def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ : Optional[Any] = TFResNetModelTester(self ) lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ) def UpperCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def UpperCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass def UpperCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(_snake_case ) lowercase__ : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[int] = [*signature.parameters.keys()] lowercase__ : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : Tuple ) -> Any: """simple docstring""" lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ): lowercase__ : str = model_class(_snake_case ) lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Tuple = self.model_tester.num_stages self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ : List[Any] = layer_type lowercase__ : Dict = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def UpperCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def __UpperCAmelCase ( ) -> Dict: lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : str ) -> Any: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase__ : Any = self.default_image_processor lowercase__ : int = prepare_img() lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' ) # forward pass lowercase__ : Dict = model(**_snake_case ) # verify the logits lowercase__ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
16
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase = StableDiffusionInstructPixaPixPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowerCAmelCase = CLIPTextModel(_snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def snake_case ( self , _snake_case , _snake_case=0 ): """simple docstring""" _lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert("""RGB""" ) if str(_snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(_snake_case ) else: _lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) _lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """image_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def snake_case ( self ): """simple docstring""" _lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case ) _lowerCAmelCase = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = sd_pipe(**_snake_case ).images _lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCAmelCase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case ) _lowerCAmelCase = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = """french fries""" _lowerCAmelCase = sd_pipe(**_snake_case , negative_prompt=_snake_case ) _lowerCAmelCase = output.images _lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCAmelCase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case ) _lowerCAmelCase = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = [inputs["""prompt"""]] * 2 _lowerCAmelCase = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0 _lowerCAmelCase = torch.from_numpy(_snake_case ).unsqueeze(0 ).to(_snake_case ) _lowerCAmelCase = image / 2 + 0.5 _lowerCAmelCase = image.permute(0 , 3 , 1 , 2 ) _lowerCAmelCase = image.repeat(2 , 1 , 1 , 1 ) _lowerCAmelCase = sd_pipe(**_snake_case ).images _lowerCAmelCase = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) _lowerCAmelCase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) _lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case ) _lowerCAmelCase = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = sd_pipe(**_snake_case ).images _lowerCAmelCase = image[0, -3:, -3:, -1] _lowerCAmelCase = [round(_snake_case , 4 ) for x in image_slice.flatten().tolist()] print(""",""".join([str(_snake_case ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) _lowerCAmelCase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def snake_case ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case ) _lowerCAmelCase = VaeImageProcessor(do_resize=_snake_case , do_normalize=_snake_case ) _lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = pipe(**self.get_dummy_inputs_by_type(_snake_case , input_image_type="""pt""" ) )[0] _lowerCAmelCase = components["""vae"""] _lowerCAmelCase = self.get_dummy_inputs_by_type(_snake_case , input_image_type="""pt""" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): _lowerCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode() _lowerCAmelCase = pipe(**_snake_case )[0] _lowerCAmelCase = np.abs(out - out_latents_inputs ).max() self.assertLess(_snake_case , 1e-4 , """passing latents as image input generate different result from passing image""" ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self , _snake_case=0 ): """simple docstring""" _lowerCAmelCase = torch.manual_seed(_snake_case ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" ) _lowerCAmelCase = { """prompt""": """turn him into a cyborg""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """image_guidance_scale""": 1.0, """output_type""": """numpy""", } return inputs def snake_case ( self ): """simple docstring""" _lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() _lowerCAmelCase = self.get_inputs() _lowerCAmelCase = pipe(**_snake_case ).images _lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=_snake_case ) _lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() _lowerCAmelCase = self.get_inputs() _lowerCAmelCase = pipe(**_snake_case ).images _lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=_snake_case ) _lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() _lowerCAmelCase = self.get_inputs() _lowerCAmelCase = pipe(**_snake_case ).images _lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = 0 def callback_fn(_snake_case , _snake_case , _snake_case ) -> None: _lowerCAmelCase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: _lowerCAmelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) _lowerCAmelCase = latents[0, -3:, -3:, -1] _lowerCAmelCase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: _lowerCAmelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) _lowerCAmelCase = latents[0, -3:, -3:, -1] _lowerCAmelCase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 _lowerCAmelCase = False _lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=_snake_case , torch_dtype=torch.floataa ) _lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() _lowerCAmelCase = self.get_inputs() pipe(**_snake_case , callback=_snake_case , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def snake_case ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=_snake_case , torch_dtype=torch.floataa ) _lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCAmelCase = self.get_inputs() _lowerCAmelCase = pipe(**_snake_case ) _lowerCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 _lowerCAmelCase = inputs["""image"""].resize((504, 504) ) _lowerCAmelCase = """timbrooks/instruct-pix2pix""" _lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( _snake_case , safety_checker=_snake_case , ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() _lowerCAmelCase = pipe(**_snake_case ) _lowerCAmelCase = output.images[0] _lowerCAmelCase = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) _lowerCAmelCase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
82
"""simple docstring""" import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]: if "model" in orig_key: lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1] lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: lowercase__ : str = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: lowercase__ : Optional[Any] = '''yoso.''' + orig_key return orig_key def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: for key in orig_state_dict.copy().keys(): lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase ) if ("pooler" in key) or ("sen_class" in key): continue else: lowercase__ : Tuple = val lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias'''] lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2 return orig_state_dict def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict'''] lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase ) lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase ) lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase ) print(model.load_state_dict(__lowerCamelCase ) ) model.eval() model.save_pretrained(__lowerCamelCase ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for YOSO model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase_ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
16
0
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) snake_case_ : Dict = logging.get_logger(__name__) snake_case_ : Dict = OrderedDict( [ ('align', 'EfficientNetImageProcessor'), ('beit', 'BeitImageProcessor'), ('bit', 'BitImageProcessor'), ('blip', 'BlipImageProcessor'), ('blip-2', 'BlipImageProcessor'), ('bridgetower', 'BridgeTowerImageProcessor'), ('chinese_clip', 'ChineseCLIPImageProcessor'), ('clip', 'CLIPImageProcessor'), ('clipseg', 'ViTImageProcessor'), ('conditional_detr', 'ConditionalDetrImageProcessor'), ('convnext', 'ConvNextImageProcessor'), ('convnextv2', 'ConvNextImageProcessor'), ('cvt', 'ConvNextImageProcessor'), ('data2vec-vision', 'BeitImageProcessor'), ('deformable_detr', 'DeformableDetrImageProcessor'), ('deit', 'DeiTImageProcessor'), ('deta', 'DetaImageProcessor'), ('detr', 'DetrImageProcessor'), ('dinat', 'ViTImageProcessor'), ('donut-swin', 'DonutImageProcessor'), ('dpt', 'DPTImageProcessor'), ('efficientformer', 'EfficientFormerImageProcessor'), ('efficientnet', 'EfficientNetImageProcessor'), ('flava', 'FlavaImageProcessor'), ('focalnet', 'BitImageProcessor'), ('git', 'CLIPImageProcessor'), ('glpn', 'GLPNImageProcessor'), ('groupvit', 'CLIPImageProcessor'), ('imagegpt', 'ImageGPTImageProcessor'), ('instructblip', 'BlipImageProcessor'), ('layoutlmv2', 'LayoutLMv2ImageProcessor'), ('layoutlmv3', 'LayoutLMv3ImageProcessor'), ('levit', 'LevitImageProcessor'), ('mask2former', 'Mask2FormerImageProcessor'), ('maskformer', 'MaskFormerImageProcessor'), ('mgp-str', 'ViTImageProcessor'), ('mobilenet_v1', 'MobileNetV1ImageProcessor'), ('mobilenet_v2', 'MobileNetV2ImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevitv2', 'MobileViTImageProcessor'), ('nat', 'ViTImageProcessor'), ('oneformer', 'OneFormerImageProcessor'), ('owlvit', 'OwlViTImageProcessor'), ('perceiver', 'PerceiverImageProcessor'), ('pix2struct', 'Pix2StructImageProcessor'), ('poolformer', 'PoolFormerImageProcessor'), ('regnet', 'ConvNextImageProcessor'), ('resnet', 'ConvNextImageProcessor'), ('sam', 'SamImageProcessor'), ('segformer', 'SegformerImageProcessor'), ('swiftformer', 'ViTImageProcessor'), ('swin', 'ViTImageProcessor'), ('swin2sr', 'Swin2SRImageProcessor'), ('swinv2', 'ViTImageProcessor'), ('table-transformer', 'DetrImageProcessor'), ('timesformer', 'VideoMAEImageProcessor'), ('tvlt', 'TvltImageProcessor'), ('upernet', 'SegformerImageProcessor'), ('van', 'ConvNextImageProcessor'), ('videomae', 'VideoMAEImageProcessor'), ('vilt', 'ViltImageProcessor'), ('vit', 'ViTImageProcessor'), ('vit_hybrid', 'ViTHybridImageProcessor'), ('vit_mae', 'ViTImageProcessor'), ('vit_msn', 'ViTImageProcessor'), ('xclip', 'CLIPImageProcessor'), ('yolos', 'YolosImageProcessor'), ] ) snake_case_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def A__ ( UpperCAmelCase_ ): for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _UpperCamelCase : Any = model_type_to_module_name(UpperCAmelCase_ ) _UpperCamelCase : List[str] = importlib.import_module(f'.{module_name}' , 'transformers.models' ) try: return getattr(UpperCAmelCase_ , UpperCAmelCase_ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(UpperCAmelCase_ , '__name__' , UpperCAmelCase_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _UpperCamelCase : List[Any] = importlib.import_module('transformers' ) if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ): return getattr(UpperCAmelCase_ , UpperCAmelCase_ ) return None def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , **UpperCAmelCase_ , ): _UpperCamelCase : Optional[Any] = get_file_from_repo( UpperCAmelCase_ , UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , revision=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , ) if resolved_config_file is None: logger.info( 'Could not locate the image processor configuration file, will try to use the model config instead.' ) return {} with open(UpperCAmelCase_ , encoding='utf-8' ) as reader: return json.load(UpperCAmelCase_ ) class lowercase__ : def __init__( self : Tuple ): '''simple docstring''' raise EnvironmentError( 'AutoImageProcessor is designed to be instantiated ' 'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(lowerCamelCase__ ) def UpperCamelCase_ ( cls : Tuple ,lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Tuple ): '''simple docstring''' _UpperCamelCase : int = kwargs.pop('config' ,lowerCamelCase__ ) _UpperCamelCase : List[Any] = kwargs.pop('trust_remote_code' ,lowerCamelCase__ ) _UpperCamelCase : Tuple = True _UpperCamelCase , _UpperCamelCase : Dict = ImageProcessingMixin.get_image_processor_dict(lowerCamelCase__ ,**lowerCamelCase__ ) _UpperCamelCase : List[Any] = config_dict.get('image_processor_type' ,lowerCamelCase__ ) _UpperCamelCase : Tuple = None if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ): _UpperCamelCase : Optional[int] = config_dict['auto_map']['AutoImageProcessor'] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _UpperCamelCase : Optional[Any] = config_dict.pop('feature_extractor_type' ,lowerCamelCase__ ) if feature_extractor_class is not None: logger.warning( 'Could not find image processor class in the image processor config or the model config. Loading' ' based on pattern matching with the model\'s feature extractor configuration.' ) _UpperCamelCase : int = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' ) if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ): _UpperCamelCase : List[Any] = config_dict['auto_map']['AutoFeatureExtractor'] _UpperCamelCase : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' ) logger.warning( 'Could not find image processor auto map in the image processor config or the model config.' ' Loading based on pattern matching with the model\'s feature extractor configuration.' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : int = AutoConfig.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ ) # It could be in `config.image_processor_type`` _UpperCamelCase : List[Any] = getattr(lowerCamelCase__ ,'image_processor_type' ,lowerCamelCase__ ) if hasattr(lowerCamelCase__ ,'auto_map' ) and "AutoImageProcessor" in config.auto_map: _UpperCamelCase : List[str] = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: _UpperCamelCase : Union[str, Any] = image_processor_class_from_name(lowerCamelCase__ ) _UpperCamelCase : str = image_processor_auto_map is not None _UpperCamelCase : Any = image_processor_class is not None or type(lowerCamelCase__ ) in IMAGE_PROCESSOR_MAPPING _UpperCamelCase : Any = resolve_trust_remote_code( lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) if has_remote_code and trust_remote_code: _UpperCamelCase : str = get_class_from_dynamic_module( lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ) _UpperCamelCase : Optional[Any] = kwargs.pop('code_revision' ,lowerCamelCase__ ) if os.path.isdir(lowerCamelCase__ ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ ) elif image_processor_class is not None: return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(lowerCamelCase__ ) in IMAGE_PROCESSOR_MAPPING: _UpperCamelCase : int = IMAGE_PROCESSOR_MAPPING[type(lowerCamelCase__ )] return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ ) raise ValueError( F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ' F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ' F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' ) @staticmethod def UpperCamelCase_ ( lowerCamelCase__ : List[str] ,lowerCamelCase__ : Union[str, Any] ): '''simple docstring''' IMAGE_PROCESSOR_MAPPING.register(lowerCamelCase__ ,lowerCamelCase__ )
83
"""simple docstring""" import os def __UpperCAmelCase ( ) -> int: with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file: lowercase__ : List[Any] = str(file.readlines()[0] ) lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' ) names.sort() lowercase__ : int = 0 lowercase__ : Optional[Any] = 0 for i, name in enumerate(__lowerCamelCase ): for letter in name: name_score += ord(__lowerCamelCase ) - 64 total_score += (i + 1) * name_score lowercase__ : List[str] = 0 return total_score if __name__ == "__main__": print(solution())
16
0
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self ) -> Tuple: lowerCAmelCase_ :Dict = [] def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> List[str]: self.events.append("""on_init_end""" ) def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any: self.events.append("""on_train_begin""" ) def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Tuple: self.events.append("""on_train_end""" ) def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any: self.events.append("""on_epoch_begin""" ) def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]: self.events.append("""on_epoch_end""" ) def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]: self.events.append("""on_step_begin""" ) def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any: self.events.append("""on_step_end""" ) def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any: self.events.append("""on_evaluate""" ) def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> int: self.events.append("""on_predict""" ) def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Union[str, Any]: self.events.append("""on_save""" ) def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Dict: self.events.append("""on_log""" ) def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]: self.events.append("""on_prediction_step""" ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[int] = tempfile.mkdtemp() def __lowerCAmelCase ( self ) -> Union[str, Any]: shutil.rmtree(self.output_dir ) def __lowerCAmelCase ( self , __A=0 , __A=0 , __A=64 , __A=64 , __A=None , __A=False , **__A ) -> Optional[int]: # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. lowerCAmelCase_ :Optional[Any] = RegressionDataset(length=__A ) lowerCAmelCase_ :str = RegressionDataset(length=__A ) lowerCAmelCase_ :Dict = RegressionModelConfig(a=__A , b=__A ) lowerCAmelCase_ :Dict = RegressionPreTrainedModel(__A ) lowerCAmelCase_ :Any = TrainingArguments(self.output_dir , disable_tqdm=__A , report_to=[] , **__A ) return Trainer( __A , __A , train_dataset=__A , eval_dataset=__A , callbacks=__A , ) def __lowerCAmelCase ( self , __A , __A ) -> List[Any]: self.assertEqual(len(__A ) , len(__A ) ) # Order doesn't matter lowerCAmelCase_ :Optional[int] = sorted(__A , key=lambda __A : cb.__name__ if isinstance(__A , __A ) else cb.__class__.__name__ ) lowerCAmelCase_ :List[Any] = sorted(__A , key=lambda __A : cb.__name__ if isinstance(__A , __A ) else cb.__class__.__name__ ) for cba, cba in zip(__A , __A ): if isinstance(__A , __A ) and isinstance(__A , __A ): self.assertEqual(__A , __A ) elif isinstance(__A , __A ) and not isinstance(__A , __A ): self.assertEqual(__A , cba.__class__ ) elif not isinstance(__A , __A ) and isinstance(__A , __A ): self.assertEqual(cba.__class__ , __A ) else: self.assertEqual(__A , __A ) def __lowerCAmelCase ( self , __A ) -> int: lowerCAmelCase_ :Optional[int] = ["""on_init_end""", """on_train_begin"""] lowerCAmelCase_ :Any = 0 lowerCAmelCase_ :Optional[Any] = len(trainer.get_eval_dataloader() ) lowerCAmelCase_ :List[Any] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs ): expected_events.append("""on_epoch_begin""" ) for _ in range(__A ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""" ) expected_events.append("""on_epoch_end""" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Union[str, Any] = self.get_trainer() lowerCAmelCase_ :Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __A ) # Callbacks passed at init are added to the default callbacks lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(__A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowerCAmelCase_ :List[Any] = self.get_trainer(disable_tqdm=__A ) lowerCAmelCase_ :Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowerCAmelCase_ :Optional[int] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(__A ) expected_callbacks.remove(__A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A ) lowerCAmelCase_ :int = self.get_trainer() lowerCAmelCase_ :Tuple = trainer.pop_callback(__A ) self.assertEqual(cb.__class__ , __A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A ) trainer.add_callback(__A ) expected_callbacks.insert(0 , __A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A ) # We can also add, pop, or remove by instance lowerCAmelCase_ :str = self.get_trainer() lowerCAmelCase_ :Optional[int] = trainer.callback_handler.callbacks[0] trainer.remove_callback(__A ) expected_callbacks.remove(__A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A ) lowerCAmelCase_ :int = self.get_trainer() lowerCAmelCase_ :Tuple = trainer.callback_handler.callbacks[0] lowerCAmelCase_ :Dict = trainer.pop_callback(__A ) self.assertEqual(__A , __A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A ) trainer.add_callback(__A ) expected_callbacks.insert(0 , __A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A ) def __lowerCAmelCase ( self ) -> Optional[Any]: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=__A ) lowerCAmelCase_ :Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() lowerCAmelCase_ :Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A ) ) # Independent log/save/eval lowerCAmelCase_ :Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() lowerCAmelCase_ :Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A ) ) lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() lowerCAmelCase_ :Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A ) ) lowerCAmelCase_ :Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" ) trainer.train() lowerCAmelCase_ :List[Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A ) ) lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" ) trainer.train() lowerCAmelCase_ :Optional[Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A ) ) # A bit of everything lowerCAmelCase_ :Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() lowerCAmelCase_ :Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A ) ) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock: lowerCAmelCase_ :Optional[int] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(__A ) in warn_mock.call_args[0][0]
84
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(A_ ) class __A ( A_ ): '''simple docstring''' def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]: """simple docstring""" super().__init__(**_snake_case ) requires_backends(self ,'''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]: """simple docstring""" return super().__call__(_snake_case ,**_snake_case ) def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ : List[str] = {} if "candidate_labels" in kwargs: lowercase__ : Any = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowercase__ : Optional[Any] = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]: """simple docstring""" lowercase__ : List[Any] = load_image(_snake_case ) lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework ) lowercase__ : str = candidate_labels lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels] lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case ) lowercase__ : Optional[int] = [text_inputs] return inputs def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' ) lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] ,_snake_case ): lowercase__ : List[str] = text_inputs[0] else: # Batching case. lowercase__ : int = text_inputs[0][0] lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case ) lowercase__ : Union[str, Any] = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any: """simple docstring""" lowercase__ : Dict = model_outputs.pop('''candidate_labels''' ) lowercase__ : Optional[Any] = model_outputs['''logits'''][0] if self.framework == "pt": lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) lowercase__ : Tuple = probs.tolist() if not isinstance(_snake_case ,_snake_case ): lowercase__ : Any = [scores] elif self.framework == "tf": lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 ) lowercase__ : Optional[Any] = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) lowercase__ : Union[str, Any] = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] ) ] return result
16
0
'''simple docstring''' import re def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" ) if match := re.search(snake_case , snake_case ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("+918827897895"))
85
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]: print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )] for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): lowercase__ : List[str] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(__lowerCamelCase ): # looping through rows of graph array for i in range(__lowerCamelCase ): # looping through columns of graph array for j in range(__lowerCamelCase ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): lowercase__ : str = dist[i][k] + dist[k][j] _print_dist(__lowerCamelCase , __lowerCamelCase ) return dist, v if __name__ == "__main__": lowerCAmelCase_ = int(input('Enter number of vertices: ')) lowerCAmelCase_ = int(input('Enter number of edges: ')) lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): lowerCAmelCase_ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) lowerCAmelCase_ = int(input('Enter source:')) lowerCAmelCase_ = int(input('Enter destination:')) lowerCAmelCase_ = float(input('Enter weight:')) lowerCAmelCase_ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
16
0
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float(moles / volume ) * nfactor ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((moles * 0.0821 * temperature) / (volume) ) ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
86
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class __A ( A_ ): '''simple docstring''' def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None: """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,) super().__init__(*_snake_case ,**_snake_case )
16
0
from bisect import bisect from itertools import accumulate def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any]): lowercase__ : int = sorted(zip(_lowerCamelCase , _lowerCamelCase) , key=lambda _lowerCamelCase: x[0] / x[1] , reverse=_lowerCamelCase) lowercase__ , lowercase__ : List[Any] = [i[0] for i in r], [i[1] for i in r] lowercase__ : Union[str, Any] = list(accumulate(_lowerCamelCase)) lowercase__ : Optional[int] = bisect(_lowerCamelCase , _lowerCamelCase) return ( 0 if k == 0 else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k]) ) if __name__ == "__main__": import doctest doctest.testmod()
87
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
16
0
def a__ ( A_ ): '''simple docstring''' return 10 - x * x def a__ ( A_, A_ ): '''simple docstring''' if equation(A_ ) * equation(A_ ) >= 0: raise ValueError("""Wrong space!""" ) __magic_name__ = a while (b - a) >= 0.01: # Find middle point __magic_name__ = (a + b) / 2 # Check if middle point is root if equation(A_ ) == 0.0: break # Decide the side to repeat the steps if equation(A_ ) * equation(A_ ) < 0: __magic_name__ = c else: __magic_name__ = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
88
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class __A ( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Any = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Any ) -> Tuple: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Any = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) def UpperCAmelCase ( self : int ) -> List[Any]: """simple docstring""" lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
16
0
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __lowerCamelCase ( ) -> int: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCAmelCase_ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def __lowerCamelCase ( ) -> str: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def __lowerCamelCase ( ) -> Optional[int]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCAmelCase_ ): http_head('https://huggingface.co' )
89
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int: lowercase__ : int = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F'''{solution() = }''')
16
0
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
90
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __A ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" debug_launcher(test_script.main ) def UpperCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" debug_launcher(test_ops.main )
16
0
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : int = logging.get_logger(__name__) def _A (__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = SwinConfig.from_pretrained( '''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) SCREAMING_SNAKE_CASE_ : Optional[int] = MaskFormerConfig(backbone_config=__a ) SCREAMING_SNAKE_CASE_ : List[Any] = '''huggingface/label-files''' if "ade20k-full" in model_name: # this should be ok SCREAMING_SNAKE_CASE_ : Optional[Any] = 8_47 SCREAMING_SNAKE_CASE_ : int = '''maskformer-ade20k-full-id2label.json''' elif "ade" in model_name: # this should be ok SCREAMING_SNAKE_CASE_ : Optional[int] = 1_50 SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''ade20k-id2label.json''' elif "coco-stuff" in model_name: # this should be ok SCREAMING_SNAKE_CASE_ : Any = 1_71 SCREAMING_SNAKE_CASE_ : List[str] = '''maskformer-coco-stuff-id2label.json''' elif "coco" in model_name: # TODO SCREAMING_SNAKE_CASE_ : List[Any] = 1_33 SCREAMING_SNAKE_CASE_ : List[Any] = '''coco-panoptic-id2label.json''' elif "cityscapes" in model_name: # this should be ok SCREAMING_SNAKE_CASE_ : Dict = 19 SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''cityscapes-id2label.json''' elif "vistas" in model_name: # this should be ok SCREAMING_SNAKE_CASE_ : int = 65 SCREAMING_SNAKE_CASE_ : List[Any] = '''mapillary-vistas-id2label.json''' SCREAMING_SNAKE_CASE_ : Optional[int] = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) ) SCREAMING_SNAKE_CASE_ : Any = {int(__a ): v for k, v in idalabel.items()} return config def _A (__a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = [] # stem # fmt: off rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.layers.{i}.downsample.reduction.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.layers.{i}.downsample.norm.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.layers.{i}.downsample.norm.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') ) # FPN rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'sem_seg_head.adapter_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') ) rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') ) rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') ) rename_keys.append((f'sem_seg_head.layer_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') ) rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') ) rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') ) rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') ) rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') ) # cross-attention out projection rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') ) # MLP 1 rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', f'model.transformer_module.decoder.layers.{idx}.fc1.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', f'model.transformer_module.decoder.layers.{idx}.fc1.bias') ) # MLP 2 rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', f'model.transformer_module.decoder.layers.{idx}.fc2.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', f'model.transformer_module.decoder.layers.{idx}.fc2.bias') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') ) # layernorm 3 (final layernorm) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') ) # heads on top rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') ) for i in range(3 ): rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.weight', f'mask_embedder.{i}.0.weight') ) rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.bias', f'mask_embedder.{i}.0.bias') ) # fmt: on return rename_keys def _A (__a , __a , __a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = dct.pop(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = val def _A (__a , __a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE_ : Optional[int] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE_ : Dict = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' ) SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_ : List[str] = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE_ : int = in_proj_bias[: dim] SCREAMING_SNAKE_CASE_ : int = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE_ : Optional[int] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE_ : str = in_proj_bias[-dim :] # fmt: on def _A (__a , __a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' ) SCREAMING_SNAKE_CASE_ : Dict = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE_ : List[Any] = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE_ : Dict = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE_ : Dict = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE_ : str = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' ) SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_ : Any = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE_ : List[Any] = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE_ : Any = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE_ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE_ : Optional[Any] = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[-hidden_size :] # fmt: on def _A () -> torch.Tensor: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def _A (__a , __a , __a , __a = False ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = get_maskformer_config(__a ) # load original state_dict with open(__a , '''rb''' ) as f: SCREAMING_SNAKE_CASE_ : Any = pickle.load(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = data['''model'''] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys SCREAMING_SNAKE_CASE_ : Union[str, Any] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) read_in_swin_q_k_v(__a , config.backbone_config ) read_in_decoder_q_k_v(__a , __a ) # update to torch tensors for key, value in state_dict.items(): SCREAMING_SNAKE_CASE_ : Optional[int] = torch.from_numpy(__a ) # load 🤗 model SCREAMING_SNAKE_CASE_ : Dict = MaskFormerForInstanceSegmentation(__a ) model.eval() for name, param in model.named_parameters(): print(__a , param.shape ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = model.load_state_dict(__a , strict=__a ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(__a ) == 0, f'Unexpected keys: {unexpected_keys}' # verify results SCREAMING_SNAKE_CASE_ : List[Any] = prepare_img() if "vistas" in model_name: SCREAMING_SNAKE_CASE_ : Tuple = 65 elif "cityscapes" in model_name: SCREAMING_SNAKE_CASE_ : Optional[int] = 6_55_35 else: SCREAMING_SNAKE_CASE_ : int = 2_55 SCREAMING_SNAKE_CASE_ : Any = True if '''ade''' in model_name else False SCREAMING_SNAKE_CASE_ : Optional[int] = MaskFormerImageProcessor(ignore_index=__a , reduce_labels=__a ) SCREAMING_SNAKE_CASE_ : int = image_processor(__a , return_tensors='''pt''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**__a ) print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": SCREAMING_SNAKE_CASE_ : int = torch.tensor( [[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __a , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'Saving model and image processor to {pytorch_dump_folder_path}' ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) image_processor.save_pretrained(__a ) if push_to_hub: print('''Pushing model and image processor to the hub...''' ) model.push_to_hub(f'nielsr/{model_name}' ) image_processor.push_to_hub(f'nielsr/{model_name}' ) if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase_ : Dict = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase_ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
0
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available UpperCamelCase__ = logging.getLogger(__name__) @dataclass class a__ : _a : str _a : List[str] _a : Optional[List[str]] @dataclass class a__ : _a : List[int] _a : List[int] _a : Optional[List[int]] = None _a : Optional[List[int]] = None class a__ ( snake_case__ ): _a : Optional[int] = """train""" _a : int = """dev""" _a : Tuple = """test""" class a__ : @staticmethod def __SCREAMING_SNAKE_CASE( _A , _A ): """simple docstring""" raise NotImplementedError @staticmethod def __SCREAMING_SNAKE_CASE( _A ): """simple docstring""" raise NotImplementedError @staticmethod def __SCREAMING_SNAKE_CASE( _A , _A , _A , _A , _A=False , _A="[CLS]" , _A=1 , _A="[SEP]" , _A=False , _A=False , _A=0 , _A=0 , _A=-1_0_0 , _A=0 , _A=True , ): """simple docstring""" __lowerCAmelCase = {label: i for i, label in enumerate(_A )} __lowerCAmelCase = [] for ex_index, example in enumerate(_A ): if ex_index % 1_0_0_0_0 == 0: logger.info("Writing example %d of %d" , _A , len(_A ) ) __lowerCAmelCase = [] __lowerCAmelCase = [] for word, label in zip(example.words , example.labels ): __lowerCAmelCase = tokenizer.tokenize(_A ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(_A ) > 0: tokens.extend(_A ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_A ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. __lowerCAmelCase = tokenizer.num_special_tokens_to_add() if len(_A ) > max_seq_length - special_tokens_count: __lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)] __lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] __lowerCAmelCase = [sequence_a_segment_id] * len(_A ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: __lowerCAmelCase = [cls_token] + tokens __lowerCAmelCase = [pad_token_label_id] + label_ids __lowerCAmelCase = [cls_token_segment_id] + segment_ids __lowerCAmelCase = tokenizer.convert_tokens_to_ids(_A ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. __lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(_A ) # Zero-pad up to the sequence length. __lowerCAmelCase = max_seq_length - len(_A ) if pad_on_left: __lowerCAmelCase = ([pad_token] * padding_length) + input_ids __lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask __lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids __lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length if ex_index < 5: logger.info("*** Example ***" ) logger.info("guid: %s" , example.guid ) logger.info("tokens: %s" , " ".join([str(_A ) for x in tokens] ) ) logger.info("input_ids: %s" , " ".join([str(_A ) for x in input_ids] ) ) logger.info("input_mask: %s" , " ".join([str(_A ) for x in input_mask] ) ) logger.info("segment_ids: %s" , " ".join([str(_A ) for x in segment_ids] ) ) logger.info("label_ids: %s" , " ".join([str(_A ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: __lowerCAmelCase = None features.append( InputFeatures( input_ids=_A , attention_mask=_A , token_type_ids=_A , label_ids=_A ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class a__ ( snake_case__ ): _a : List[InputFeatures] _a : int = nn.CrossEntropyLoss().ignore_index def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ): """simple docstring""" __lowerCAmelCase = os.path.join( _A , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(_A ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowerCAmelCase = cached_features_file + ".lock" with FileLock(_A ): if os.path.exists(_A ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __lowerCAmelCase = torch.load(_A ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __lowerCAmelCase = token_classification_task.read_examples_from_file(_A , _A ) # TODO clean up all this to leverage built-in features of tokenizers __lowerCAmelCase = token_classification_task.convert_examples_to_features( _A , _A , _A , _A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(f"""Saving features into cached file {cached_features_file}""" ) torch.save(self.features , _A ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _A ): """simple docstring""" return self.features[i] if is_tf_available(): import tensorflow as tf class a__ : _a : List[InputFeatures] _a : int = -1_0_0 def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ): """simple docstring""" __lowerCAmelCase = token_classification_task.read_examples_from_file(_A , _A ) # TODO clean up all this to leverage built-in features of tokenizers __lowerCAmelCase = token_classification_task.convert_examples_to_features( _A , _A , _A , _A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: __lowerCAmelCase = tf.data.Dataset.from_generator( _A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , ( {"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: __lowerCAmelCase = tf.data.Dataset.from_generator( _A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , ( { "input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] ), "token_type_ids": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _A ): """simple docstring""" return self.features[i]
92
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A ( metaclass=A_ ): '''simple docstring''' lowerCAmelCase : List[str] = ["torch", "torchsde"] def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]: """simple docstring""" requires_backends(self ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] )
16
0
'''simple docstring''' from math import isqrt, loga def snake_case_ ( __SCREAMING_SNAKE_CASE : int ): """simple docstring""" lowercase_ : Any = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase_ : Optional[Any] = False return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]] def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 800800 , __SCREAMING_SNAKE_CASE : int = 800800 ): """simple docstring""" lowercase_ : Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = int(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = 0 lowercase_ : List[Any] = 0 lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f"""{solution() = }""")
93
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 class __A ( A_ ): '''simple docstring''' pass def __UpperCAmelCase ( __lowerCamelCase ) -> Dict: for shard in shards: for i in range(__lowerCamelCase ): yield {"i": i, "shard": shard} def __UpperCAmelCase ( ) -> Tuple: lowercase__ : int = int(os.environ['''RANK'''] ) lowercase__ : str = int(os.environ['''WORLD_SIZE'''] ) lowercase__ : List[Any] = ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCamelCase ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase ) parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 ) lowercase__ : int = parser.parse_args() lowercase__ : Optional[Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]} lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase ) if not streaming: lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) ) lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase ) lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase ) lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : str = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) lowercase__ : str = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
16
0
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin snake_case : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class _snake_case ( _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = XLNetTokenizer SCREAMING_SNAKE_CASE__ = XLNetTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True def SCREAMING_SNAKE_CASE__ ( self ): super().setUp() # We have a SentencePiece fixture for testing a :Dict = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = '''<s>''' a :Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<eod>''' ) self.assertEqual(len(_lowerCamelCase ) , 1006 ) def SCREAMING_SNAKE_CASE__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) a :Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] ) a :Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) a :Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) a :int = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase ) a :Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''''', '''i''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase ) a :Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) @slow def SCREAMING_SNAKE_CASE__ ( self ): a :int = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' ) a :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase ) a :Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase ) a :Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) a :Any = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def SCREAMING_SNAKE_CASE__ ( self ): # fmt: off a :Dict = {'''input_ids''': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
94
"""simple docstring""" from ...configuration_utils import PretrainedConfig lowerCAmelCase_ = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : str = "tapas" def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_snake_case ,**_snake_case ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowercase__ : Optional[int] = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Optional[int] = hidden_act lowercase__ : List[Any] = intermediate_size lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Dict = attention_probs_dropout_prob lowercase__ : str = max_position_embeddings lowercase__ : Dict = type_vocab_sizes lowercase__ : Optional[Any] = initializer_range lowercase__ : Dict = layer_norm_eps # Fine-tuning task hyperparameters lowercase__ : Any = positive_label_weight lowercase__ : int = num_aggregation_labels lowercase__ : List[str] = aggregation_loss_weight lowercase__ : Optional[int] = use_answer_as_supervision lowercase__ : Optional[Any] = answer_loss_importance lowercase__ : Union[str, Any] = use_normalized_answer_loss lowercase__ : str = huber_loss_delta lowercase__ : str = temperature lowercase__ : int = aggregation_temperature lowercase__ : List[Any] = use_gumbel_for_cells lowercase__ : Tuple = use_gumbel_for_aggregation lowercase__ : Union[str, Any] = average_approximation_function lowercase__ : Union[str, Any] = cell_selection_preference lowercase__ : Any = answer_loss_cutoff lowercase__ : List[Any] = max_num_rows lowercase__ : str = max_num_columns lowercase__ : int = average_logits_per_cell lowercase__ : str = select_one_column lowercase__ : str = allow_empty_column_selection lowercase__ : Any = init_cell_selection_weights_to_zero lowercase__ : Optional[int] = reset_position_index_per_cell lowercase__ : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters lowercase__ : Optional[Any] = aggregation_labels lowercase__ : List[Any] = no_aggregation_label_index if isinstance(self.aggregation_labels ,_snake_case ): lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
16
0
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor UpperCAmelCase : int = logging.get_logger(__name__) class __lowerCAmelCase ( UpperCamelCase__): def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None: '''simple docstring''' warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
95
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : '''simple docstring''' def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]: """simple docstring""" lowercase__ : Dict = parent lowercase__ : Any = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Dict = patch_size lowercase__ : int = num_channels lowercase__ : Any = embed_dim lowercase__ : int = depths lowercase__ : Dict = num_heads lowercase__ : List[Any] = window_size lowercase__ : int = mlp_ratio lowercase__ : Optional[int] = qkv_bias lowercase__ : str = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : Dict = drop_path_rate lowercase__ : int = hidden_act lowercase__ : Tuple = use_absolute_embeddings lowercase__ : Tuple = patch_norm lowercase__ : Tuple = layer_norm_eps lowercase__ : Optional[Any] = initializer_range lowercase__ : int = is_training lowercase__ : Optional[int] = scope lowercase__ : str = use_labels lowercase__ : Dict = type_sequence_label_size lowercase__ : Union[str, Any] = encoder_stride def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None if self.use_labels: lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : Any = SwinvaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ) lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any: """simple docstring""" lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Tuple = model(_snake_case ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ : Optional[int] = 1 lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : str = model(_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any: """simple docstring""" lowercase__ : Tuple = self.type_sequence_label_size lowercase__ : Dict = SwinvaForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Union[str, Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowerCAmelCase : Optional[int] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : List[Any] = False lowerCAmelCase : Dict = False lowerCAmelCase : List[Any] = False lowerCAmelCase : Any = False def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[Any] = SwinvaModelTester(self ) lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 ) def UpperCAmelCase ( self : int ) -> Any: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : str ) -> List[Any]: """simple docstring""" lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' ) def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''' ) def UpperCAmelCase ( self : List[str] ) -> str: """simple docstring""" pass def UpperCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowercase__ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) ) def UpperCAmelCase ( self : int ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(_snake_case ) lowercase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple = True for model_class in self.all_model_classes: lowercase__ : Optional[int] = True lowercase__ : str = False lowercase__ : Union[str, Any] = True lowercase__ : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Dict = outputs.attentions lowercase__ : Any = len(self.model_tester.depths ) self.assertEqual(len(_snake_case ) ,_snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : List[Any] = True lowercase__ : Optional[Any] = config.window_size**2 lowercase__ : Any = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(_snake_case ) ,_snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) lowercase__ : Optional[Any] = len(_snake_case ) # Check attention is always last and order is fine lowercase__ : Optional[int] = True lowercase__ : Tuple = True lowercase__ : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) if hasattr(self.model_tester ,'''num_hidden_states_types''' ): lowercase__ : int = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowercase__ : List[str] = 2 self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) ) lowercase__ : Optional[int] = outputs.attentions self.assertEqual(len(_snake_case ) ,_snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ : List[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Optional[int] = outputs.hidden_states lowercase__ : List[Any] = getattr( self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_snake_case ) ,_snake_case ) # Swinv2 has a different seq_length lowercase__ : Dict = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) lowercase__ : Tuple = outputs.reshaped_hidden_states self.assertEqual(len(_snake_case ) ,_snake_case ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape lowercase__ : int = ( reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def UpperCAmelCase ( self : Tuple ) -> int: """simple docstring""" lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : str = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) def UpperCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = 3 lowercase__ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase__ : Optional[int] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowercase__ : str = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) def UpperCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def UpperCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: lowercase__ : Optional[int] = model_class(config=_snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @require_vision @require_torch class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : Any ) -> List[str]: """simple docstring""" lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to( _snake_case ) lowercase__ : Union[str, Any] = self.default_image_processor lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case ) # forward pass with torch.no_grad(): lowercase__ : Optional[Any] = model(**_snake_case ) # verify the logits lowercase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
16
0
"""simple docstring""" def _snake_case ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(lowercase__ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F"{solution() = }")
96
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict: """simple docstring""" import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]: """simple docstring""" if NLTK_VERSION >= version.Version('''3.6.5''' ): lowercase__ : int = [ meteor_score.single_meteor_score( word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] else: lowercase__ : Tuple = [ meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] return {"meteor": np.mean(_snake_case )}
16
0
'''simple docstring''' def a ( __a ) -> None: '''simple docstring''' UpperCamelCase__ :Optional[int] = generate_pascal_triangle(__a ) for row_idx in range(__a ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=''' ''' ) else: print(triangle[row_idx][col_idx] , end='''''' ) print() def a ( __a ) -> list[list[int]]: '''simple docstring''' if not isinstance(__a , __a ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) UpperCamelCase__ :list[list[int]] = [] for current_row_idx in range(__a ): UpperCamelCase__ :List[Any] = populate_current_row(__a , __a ) triangle.append(__a ) return triangle def a ( __a , __a ) -> list[int]: '''simple docstring''' UpperCamelCase__ :int = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 UpperCamelCase__ , UpperCamelCase__ :Dict = 1, 1 for current_col_idx in range(1 , __a ): calculate_current_element( __a , __a , __a , __a ) return current_row def a ( __a , __a , __a , __a , ) -> None: '''simple docstring''' UpperCamelCase__ :Any = triangle[current_row_idx - 1][current_col_idx - 1] UpperCamelCase__ :List[Any] = triangle[current_row_idx - 1][current_col_idx] UpperCamelCase__ :List[Any] = above_to_left_elt + above_to_right_elt def a ( __a ) -> list[list[int]]: '''simple docstring''' if not isinstance(__a , __a ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) UpperCamelCase__ :list[list[int]] = [[1]] for row_index in range(1 , __a ): UpperCamelCase__ :Any = [0] + result[-1] + [0] UpperCamelCase__ :Optional[int] = row_index + 1 # Calculate the number of distinct elements in a row UpperCamelCase__ :Any = sum(divmod(__a , 2 ) ) UpperCamelCase__ :Optional[int] = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] UpperCamelCase__ :Optional[int] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() UpperCamelCase__ :Dict = row_first_half + row_second_half result.append(__a ) return result def a ( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(__a , __a ) -> None: UpperCamelCase__ :List[str] = f'''{func.__name__}({value})''' UpperCamelCase__ :List[str] = timeit(f'''__main__.{call}''' , setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(__a , __a ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
97
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = '▁' lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'} lowerCAmelCase_ = { 'vocab_file': { 'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model', } } lowerCAmelCase_ = { 'facebook/xglm-564M': 2_048, } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : int = ["input_ids", "attention_mask"] def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None: """simple docstring""" lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase__ : Any = 7 lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )] lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,) lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_snake_case ) ) lowercase__ : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase__ : Optional[int] = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase__ : List[str] = len(self.sp_model ) lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(_snake_case ) lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int ) -> Optional[int]: """simple docstring""" lowercase__ : List[Any] = self.__dict__.copy() lowercase__ : Optional[int] = None lowercase__ : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any: """simple docstring""" lowercase__ : int = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): lowercase__ : Dict = {} lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase__ : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case ) if token_ids_a is None: return [1] + ([0] * len(_snake_case )) return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ : List[Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def UpperCAmelCase ( self : str ) -> Tuple: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_snake_case ,out_type=_snake_case ) def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip() return out_string def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : Any = os.path.join( _snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case ,'''wb''' ) as fi: lowercase__ : Dict = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,)
16
0
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class snake_case : """simple docstring""" def __init__( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int=sys.maxsize ): UpperCAmelCase__ = 'bilinear' UpperCAmelCase__ = max_size UpperCAmelCase__ = short_edge_length def __call__( self : List[Any] ,lowerCamelCase__ : str ): UpperCAmelCase__ = [] for img in imgs: UpperCAmelCase__ , UpperCAmelCase__ = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase__ = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase__ = size * 1.0 / min(lowerCamelCase__ ,lowerCamelCase__ ) if h < w: UpperCAmelCase__ , UpperCAmelCase__ = size, scale * w else: UpperCAmelCase__ , UpperCAmelCase__ = scale * h, size if max(lowerCamelCase__ ,lowerCamelCase__ ) > self.max_size: UpperCAmelCase__ = self.max_size * 1.0 / max(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = newh * scale UpperCAmelCase__ = neww * scale UpperCAmelCase__ = int(neww + 0.5 ) UpperCAmelCase__ = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase__ = Image.fromarray(lowerCamelCase__ ) UpperCAmelCase__ = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR ) UpperCAmelCase__ = np.asarray(lowerCamelCase__ ) else: UpperCAmelCase__ = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase__ = nn.functional.interpolate( lowerCamelCase__ ,(newh, neww) ,mode=self.interp_method ,align_corners=lowerCamelCase__ ).squeeze(0 ) img_augs.append(lowerCamelCase__ ) return img_augs class snake_case : """simple docstring""" def __init__( self : List[str] ,lowerCamelCase__ : Tuple ): UpperCAmelCase__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase__ = cfg.INPUT.FORMAT UpperCAmelCase__ = cfg.SIZE_DIVISIBILITY UpperCAmelCase__ = cfg.PAD_VALUE UpperCAmelCase__ = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase__ = cfg.MODEL.DEVICE UpperCAmelCase__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 ) UpperCAmelCase__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 ) UpperCAmelCase__ = lambda lowerCamelCase__ : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Any ): UpperCAmelCase__ = tuple(max(lowerCamelCase__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase__ = [im.shape[-2:] for im in images] UpperCAmelCase__ = [ nn.functional.pad( lowerCamelCase__ ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,) for size, im in zip(lowerCamelCase__ ,lowerCamelCase__ ) ] return torch.stack(lowerCamelCase__ ), torch.tensor(lowerCamelCase__ ) def __call__( self : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : str=False ): with torch.no_grad(): if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ): UpperCAmelCase__ = [images] if single_image: assert len(lowerCamelCase__ ) == 1 for i in range(len(lowerCamelCase__ ) ): if isinstance(images[i] ,torch.Tensor ): images.insert(lowerCamelCase__ ,images.pop(lowerCamelCase__ ).to(self.device ).float() ) elif not isinstance(images[i] ,torch.Tensor ): images.insert( lowerCamelCase__ ,torch.as_tensor(img_tensorize(images.pop(lowerCamelCase__ ) ,input_format=self.input_format ) ) .to(self.device ) .float() ,) # resize smallest edge UpperCAmelCase__ = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase__ = self.aug(lowerCamelCase__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase__ = [self.normalizer(lowerCamelCase__ ) for x in images] # now pad them to do the following operations UpperCAmelCase__ , UpperCAmelCase__ = self.pad(lowerCamelCase__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase__ = torch.true_divide(lowerCamelCase__ ,lowerCamelCase__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def a_ ( lowerCamelCase , lowerCamelCase ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def a_ ( lowerCamelCase , lowerCamelCase ): assert torch.isfinite(lowerCamelCase ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase__ , UpperCAmelCase__ = box_size tensor[:, 0].clamp_(min=0 , max=lowerCamelCase ) tensor[:, 1].clamp_(min=0 , max=lowerCamelCase ) tensor[:, 2].clamp_(min=0 , max=lowerCamelCase ) tensor[:, 3].clamp_(min=0 , max=lowerCamelCase )
98
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]: print(f"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase ) else: lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase ) if hidden_sizes == 1_92: lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase ) if hidden_sizes == 2_56: lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase ) if hidden_sizes == 3_84: lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase ) from_model.eval() lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval() lowercase__ : str = OrderedDict() lowercase__ : int = from_model.state_dict() lowercase__ : Dict = list(from_model.state_dict().keys() ) lowercase__ : Any = list(our_model.state_dict().keys() ) print(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for i in range(len(__lowerCamelCase ) ): lowercase__ : str = weights[og_keys[i]] our_model.load_state_dict(__lowerCamelCase ) lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) ) lowercase__ : Optional[int] = from_model(__lowerCamelCase ) lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one." lowercase__ : Any = name print(__lowerCamelCase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) lowercase__ : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f"""Pushed {checkpoint_name}""" ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]: lowercase__ : Any = '''imagenet-1k-id2label.json''' lowercase__ : Tuple = 10_00 lowercase__ : Dict = (1, num_labels) lowercase__ : List[str] = '''huggingface/label-files''' lowercase__ : str = num_labels lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} lowercase__ : Union[str, Any] = idalabel lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()} lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) lowercase__ : Tuple = { '''levit-128S''': 1_28, '''levit-128''': 1_28, '''levit-192''': 1_92, '''levit-256''': 2_56, '''levit-384''': 3_84, } lowercase__ : Any = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, expected_shape if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
16
0
def A_ ( A__ = 10**12 ) -> int: a__ : Optional[Any] = 1 a__ : List[Any] = 0 a__ : Dict = 1 a__ : Union[str, Any] = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F"""{solution() = }""")
99
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __A : '''simple docstring''' lowerCAmelCase : List[str] lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "dict" lowerCAmelCase : ClassVar[Any] = None lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ ) def __call__( self : List[str] ) -> Any: """simple docstring""" return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class __A : '''simple docstring''' lowerCAmelCase : Optional[List] = None lowerCAmelCase : Optional[int] = None lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "dict" lowerCAmelCase : ClassVar[Any] = None lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ ) def UpperCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None lowercase__ : Dict = len(self.languages ) if self.languages else None def __call__( self : List[Any] ) -> List[Any]: """simple docstring""" return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int: """simple docstring""" lowercase__ : List[Any] = set(self.languages ) if self.languages and set(_snake_case ) - lang_set: raise ValueError( f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. lowercase__ : str = [] for lang, text in translation_dict.items(): if isinstance(_snake_case ,_snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) ) return {"language": languages, "translation": translations} def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
16
0
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __magic_name__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = val def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: __SCREAMING_SNAKE_CASE = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) __SCREAMING_SNAKE_CASE = value else: __SCREAMING_SNAKE_CASE = value return new_state_dict def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = """""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict __SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] __SCREAMING_SNAKE_CASE = in_proj_bias[:256] __SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] __SCREAMING_SNAKE_CASE = in_proj_bias[256:512] __SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] __SCREAMING_SNAKE_CASE = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" ) __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict __SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] __SCREAMING_SNAKE_CASE = in_proj_bias[:256] __SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] __SCREAMING_SNAKE_CASE = in_proj_bias[256:512] __SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] __SCREAMING_SNAKE_CASE = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention __SCREAMING_SNAKE_CASE = state_dict.pop( f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) of cross-attention to the state dict __SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:256, :] __SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:256] __SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[256:512, :] __SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[256:512] __SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-256:, :] __SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-256:] def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.size __SCREAMING_SNAKE_CASE = max(UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = 800 if """detection""" in checkpoint_url else 1000 __SCREAMING_SNAKE_CASE = target_max_size / current_max_size __SCREAMING_SNAKE_CASE = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = F.to_tensor(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = F.normalize(UpperCamelCase_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): logger.info("""Converting model...""" ) # load original state dict __SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" ) # rename keys for src, dest in rename_keys: rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them __SCREAMING_SNAKE_CASE = """model.""" for key in state_dict.copy().keys(): if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = val # create HuggingFace model and load state dict __SCREAMING_SNAKE_CASE = TableTransformerConfig( backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: __SCREAMING_SNAKE_CASE = 15 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = {0: """table""", 1: """table rotated"""} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} else: __SCREAMING_SNAKE_CASE = 125 __SCREAMING_SNAKE_CASE = 6 __SCREAMING_SNAKE_CASE = { 0: """table""", 1: """table column""", 2: """table row""", 3: """table column header""", 4: """table projected row header""", 5: """table spanning cell""", } __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = DetrImageProcessor( format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 ) __SCREAMING_SNAKE_CASE = TableTransformerForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() # verify our conversion __SCREAMING_SNAKE_CASE = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png""" __SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = Image.open(UpperCamelCase_ ).convert("""RGB""" ) __SCREAMING_SNAKE_CASE = normalize(resize(UpperCamelCase_ , UpperCamelCase_ ) ).unsqueeze(0 ) __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ) if "detection" in checkpoint_url: __SCREAMING_SNAKE_CASE = (1, 15, 3) __SCREAMING_SNAKE_CASE = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: __SCREAMING_SNAKE_CASE = (1, 125, 7) __SCREAMING_SNAKE_CASE = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if push_to_hub: # Push model to HF hub logger.info("""Pushing model to the hub...""" ) __SCREAMING_SNAKE_CASE = ( """microsoft/table-transformer-detection""" if """detection""" in checkpoint_url else """microsoft/table-transformer-structure-recognition""" ) model.push_to_hub(UpperCamelCase_ ) image_processor.push_to_hub(UpperCamelCase_ ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __magic_name__ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
100
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]: lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowercase__ : int = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__ : str = datasets.map( __lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__ : Optional[int] = 16 elif accelerator.mixed_precision != "no": lowercase__ : List[Any] = 8 else: lowercase__ : int = None return tokenizer.pad( __lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , ) # Instantiate dataloaders. lowercase__ : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) lowercase__ : str = DataLoader( tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase_ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1": lowercase__ : List[Any] = 2 # Initialize accelerator lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : str = config['''lr'''] lowercase__ : str = int(config['''num_epochs'''] ) lowercase__ : Optional[int] = int(config['''seed'''] ) lowercase__ : Tuple = int(config['''batch_size'''] ) lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__lowerCamelCase ) def inner_training_loop(__lowerCamelCase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__lowerCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__ : Tuple = model.to(accelerator.device ) # Instantiate optimizer lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase ) lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase ) # Instantiate scheduler lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Now we train the model for epoch in range(__lowerCamelCase ): model.train() for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase__ : Dict = model(**__lowerCamelCase ) lowercase__ : List[Any] = outputs.loss accelerator.backward(__lowerCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ : Tuple = model(**__lowerCamelCase ) lowercase__ : Any = outputs.logits.argmax(dim=-1 ) lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__lowerCamelCase , references=__lowerCamelCase , ) lowercase__ : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ) -> Dict: lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowercase__ : int = parser.parse_args() lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
16
0
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ :str = logging.get_logger(__name__) # TODO Update this lowercase__ :List[str] = { "facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json", # See all ESM models at https://huggingface.co/models?filter=esm } class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : Union[str, Any] ='''esm''' def __init__( self ,A__=None ,A__=None ,A__=None ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__=0.1 ,A__=0.1 ,A__=1_0_2_6 ,A__=0.02 ,A__=1E-12 ,A__="absolute" ,A__=True ,A__=None ,A__=False ,A__=False ,A__=None ,A__=None ,**A__ ,): super().__init__(pad_token_id=A__ ,mask_token_id=A__ ,**A__) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = initializer_range lowercase = layer_norm_eps lowercase = position_embedding_type lowercase = use_cache lowercase = emb_layer_norm_before lowercase = token_dropout lowercase = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''') lowercase = EsmFoldConfig() elif isinstance(A__ ,A__): lowercase = EsmFoldConfig(**A__) lowercase = esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''') lowercase = get_default_vocab_list() else: lowercase = vocab_list else: lowercase = None lowercase = None if self.esmfold_config is not None and getattr(self.esmfold_config ,'''use_esm_attn_map''' ,A__): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''') def A__ ( self): lowercase = super().to_dict() if isinstance(self.esmfold_config ,A__): lowercase = self.esmfold_config.to_dict() return output @dataclass class lowercase : lowercase_ : str =None lowercase_ : bool =True lowercase_ : bool =False lowercase_ : bool =False lowercase_ : bool =False lowercase_ : float =0 lowercase_ : bool =True lowercase_ : bool =False lowercase_ : int =128 lowercase_ : "TrunkConfig" =None def A__ ( self): if self.trunk is None: lowercase = TrunkConfig() elif isinstance(self.trunk ,A__): lowercase = TrunkConfig(**self.trunk) def A__ ( self): lowercase = asdict(self) lowercase = self.trunk.to_dict() return output @dataclass class lowercase : lowercase_ : int =48 lowercase_ : int =1024 lowercase_ : int =128 lowercase_ : int =32 lowercase_ : int =32 lowercase_ : int =32 lowercase_ : float =0 lowercase_ : float =0 lowercase_ : bool =False lowercase_ : int =4 lowercase_ : Optional[int] =128 lowercase_ : "StructureModuleConfig" =None def A__ ( self): if self.structure_module is None: lowercase = StructureModuleConfig() elif isinstance(self.structure_module ,A__): lowercase = StructureModuleConfig(**self.structure_module) if self.max_recycles <= 0: raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.') if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' f' {self.sequence_state_dim} and {self.sequence_state_dim}.') if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.') lowercase = self.sequence_state_dim // self.sequence_head_width lowercase = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.') if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.') if self.pairwise_state_dim % 2 != 0: raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.') if self.dropout >= 0.4: raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.') def A__ ( self): lowercase = asdict(self) lowercase = self.structure_module.to_dict() return output @dataclass class lowercase : lowercase_ : int =384 lowercase_ : int =128 lowercase_ : int =16 lowercase_ : int =128 lowercase_ : int =12 lowercase_ : int =4 lowercase_ : int =8 lowercase_ : float =0.1 lowercase_ : int =8 lowercase_ : int =1 lowercase_ : int =2 lowercase_ : int =7 lowercase_ : int =10 lowercase_ : float =1e-8 lowercase_ : float =1e5 def A__ ( self): return asdict(self) def UpperCamelCase ( ): '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
101
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __UpperCAmelCase ( __lowerCamelCase ) -> Any: lowercase__ : Optional[int] = [] embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", f"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", f"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", f"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", f"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict: lowercase__ : str = [] attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple: lowercase__ : List[str] = [] token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') ) return token def __UpperCAmelCase ( ) -> Optional[int]: lowercase__ : List[str] = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int: lowercase__ : List[Any] = '''imagenet-1k-id2label.json''' lowercase__ : Optional[Any] = 10_00 lowercase__ : Optional[Any] = '''huggingface/label-files''' lowercase__ : Dict = num_labels lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) ) lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()} lowercase__ : Optional[Any] = idalabel lowercase__ : str = {v: k for k, v in idalabel.items()} lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": lowercase__ : int = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": lowercase__ : int = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : List[Any] = [2, 2, 20] lowercase__ : Any = [3, 12, 16] lowercase__ : Tuple = [1_92, 7_68, 10_24] lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase ) lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) lowercase__ : List[str] = image_size lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) ) lowercase__ : int = OrderedDict() lowercase__ : List[Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase ) lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase ) for cnt in range(config.depth[idx] ): lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase ) lowercase__ : List[Any] = list_of_state_dict + final() for gg in list_of_state_dict: print(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) ): lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) image_processor.save_pretrained(__lowerCamelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--cvt_model', default='cvt-w24', type=str, help='Name of the cvt model you\'d like to convert.', ) parser.add_argument( '--image_size', default=384, type=int, help='Input Image Size', ) parser.add_argument( '--cvt_file_name', default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth', type=str, help='Input Image Size', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
16
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='vit_msn' def __init__(self , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1E-06 , a_=2_24 , a_=16 , a_=3 , a_=True , **a_ , ): '''simple docstring''' super().__init__(**a_ ) __snake_case : Any = hidden_size __snake_case : Optional[Any] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : int = initializer_range __snake_case : int = layer_norm_eps __snake_case : Any = image_size __snake_case : Any = patch_size __snake_case : int = num_channels __snake_case : str = qkv_bias
102
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str: if not isinstance(__lowerCamelCase , __lowerCamelCase ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowercase__ : Tuple = '''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__lowerCamelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
16
0
from __future__ import annotations def UpperCamelCase( __UpperCamelCase : list ): if len(__UpperCamelCase ) == 0: return [] lowerCAmelCase_ , lowerCAmelCase_ : List[str] = min(__UpperCamelCase ), max(__UpperCamelCase ) lowerCAmelCase_ : List[Any] = int(max_value - min_value ) + 1 lowerCAmelCase_ : list[list] = [[] for _ in range(__UpperCamelCase )] for i in my_list: buckets[int(i - min_value )].append(__UpperCamelCase ) return [v for bucket in buckets for v in sorted(__UpperCamelCase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
103
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : '''simple docstring''' def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str: """simple docstring""" lowercase__ : int = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Optional[Any] = image_size lowercase__ : Optional[Any] = num_channels lowercase__ : Optional[Any] = embeddings_size lowercase__ : Optional[Any] = hidden_sizes lowercase__ : str = depths lowercase__ : Tuple = is_training lowercase__ : List[Any] = use_labels lowercase__ : Union[str, Any] = hidden_act lowercase__ : Union[str, Any] = num_labels lowercase__ : Tuple = scope lowercase__ : Optional[Any] = len(_snake_case ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Tuple = None if self.use_labels: lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels ) lowercase__ : int = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return ResNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]: """simple docstring""" lowercase__ : Optional[int] = TFResNetModel(config=_snake_case ) lowercase__ : List[str] = model(_snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple: """simple docstring""" lowercase__ : Tuple = self.num_labels lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case ) lowercase__ : List[str] = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCAmelCase ( self : Tuple ) -> str: """simple docstring""" lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowerCAmelCase : Any = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase : List[Any] = False lowerCAmelCase : List[Any] = False lowerCAmelCase : int = False lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : List[str] = False def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ : Optional[Any] = TFResNetModelTester(self ) lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ) def UpperCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def UpperCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass def UpperCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(_snake_case ) lowercase__ : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[int] = [*signature.parameters.keys()] lowercase__ : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : Tuple ) -> Any: """simple docstring""" lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ): lowercase__ : str = model_class(_snake_case ) lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Tuple = self.model_tester.num_stages self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ : List[Any] = layer_type lowercase__ : Dict = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def UpperCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def __UpperCAmelCase ( ) -> Dict: lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : str ) -> Any: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase__ : Any = self.default_image_processor lowercase__ : int = prepare_img() lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' ) # forward pass lowercase__ : Dict = model(**_snake_case ) # verify the logits lowercase__ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
16
0
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowercase_ : """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Union[str, Path]] = None SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : Optional[Dict] = None SCREAMING_SNAKE_CASE : Optional[str] = None SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : bool = True SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : int = 1 SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = None SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : Optional[Dict] = None SCREAMING_SNAKE_CASE : Optional[str] = None def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return self.__class__(**{k: copy.deepcopy(lowercase__ ) for k, v in self.__dict__.items()} )
104
"""simple docstring""" import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]: if "model" in orig_key: lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1] lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: lowercase__ : str = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: lowercase__ : Optional[Any] = '''yoso.''' + orig_key return orig_key def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: for key in orig_state_dict.copy().keys(): lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase ) if ("pooler" in key) or ("sen_class" in key): continue else: lowercase__ : Tuple = val lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias'''] lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2 return orig_state_dict def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict'''] lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase ) lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase ) lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase ) print(model.load_state_dict(__lowerCamelCase ) ) model.eval() model.save_pretrained(__lowerCamelCase ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for YOSO model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase_ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
16
0
"""simple docstring""" import logging from transformers import PretrainedConfig a : Tuple = logging.getLogger(__name__) a : Dict = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __UpperCamelCase ( a__ ): lowerCamelCase : Optional[Any] ="""bertabs""" def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=512 , lowerCAmelCase__=6 , lowerCAmelCase__=512 , lowerCAmelCase__=8 , lowerCAmelCase__=512 , lowerCAmelCase__=0.2 , lowerCAmelCase__=6 , lowerCAmelCase__=768 , lowerCAmelCase__=8 , lowerCAmelCase__=2048 , lowerCAmelCase__=0.2 , **lowerCAmelCase__ , ) -> int: super().__init__(**lowerCAmelCase__ ) a : Dict = vocab_size a : str = max_pos a : str = enc_layers a : int = enc_hidden_size a : Tuple = enc_heads a : Dict = enc_ff_size a : List[Any] = enc_dropout a : str = dec_layers a : Dict = dec_hidden_size a : Union[str, Any] = dec_heads a : Union[str, Any] = dec_ff_size a : List[Any] = dec_dropout
105
"""simple docstring""" import os def __UpperCAmelCase ( ) -> int: with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file: lowercase__ : List[Any] = str(file.readlines()[0] ) lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' ) names.sort() lowercase__ : int = 0 lowercase__ : Optional[Any] = 0 for i, name in enumerate(__lowerCamelCase ): for letter in name: name_score += ord(__lowerCamelCase ) - 64 total_score += (i + 1) * name_score lowercase__ : List[str] = 0 return total_score if __name__ == "__main__": print(solution())
16
0
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __UpperCamelCase : Dict = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): lowerCAmelCase__ : Any = WavaVecaForSequenceClassification.from_pretrained(A_ , config=A_ ) lowerCAmelCase__ : List[Any] = downstream_dict['''projector.weight'''] lowerCAmelCase__ : Tuple = downstream_dict['''projector.bias'''] lowerCAmelCase__ : Dict = downstream_dict['''model.post_net.linear.weight'''] lowerCAmelCase__ : Optional[int] = downstream_dict['''model.post_net.linear.bias'''] return model def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): lowerCAmelCase__ : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(A_ , config=A_ ) lowerCAmelCase__ : Optional[int] = downstream_dict['''model.linear.weight'''] lowerCAmelCase__ : Tuple = downstream_dict['''model.linear.bias'''] return model def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): lowerCAmelCase__ : List[Any] = WavaVecaForXVector.from_pretrained(A_ , config=A_ ) lowerCAmelCase__ : int = downstream_dict['''connector.weight'''] lowerCAmelCase__ : str = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowerCAmelCase__ : List[str] = downstream_dict[ f'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] lowerCAmelCase__ : List[str] = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias'] lowerCAmelCase__ : str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] lowerCAmelCase__ : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] lowerCAmelCase__ : Any = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] lowerCAmelCase__ : str = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] lowerCAmelCase__ : str = downstream_dict['''objective.W'''] return model @torch.no_grad() def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ): lowerCAmelCase__ : str = torch.load(A_ , map_location='''cpu''' ) lowerCAmelCase__ : int = checkpoint['''Downstream'''] lowerCAmelCase__ : Dict = WavaVecaConfig.from_pretrained(A_ ) lowerCAmelCase__ : int = WavaVecaFeatureExtractor.from_pretrained( A_ , return_attention_mask=A_ , do_normalize=A_ ) lowerCAmelCase__ : Tuple = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): lowerCAmelCase__ : List[Any] = convert_classification(A_ , A_ , A_ ) elif arch.endswith('''ForAudioFrameClassification''' ): lowerCAmelCase__ : Any = convert_diarization(A_ , A_ , A_ ) elif arch.endswith('''ForXVector''' ): lowerCAmelCase__ : Optional[Any] = convert_xvector(A_ , A_ , A_ ) else: raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' ) if hf_config.use_weighted_layer_sum: lowerCAmelCase__ : Optional[Any] = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __UpperCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') __UpperCamelCase : List[str] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
106
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(A_ ) class __A ( A_ ): '''simple docstring''' def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]: """simple docstring""" super().__init__(**_snake_case ) requires_backends(self ,'''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]: """simple docstring""" return super().__call__(_snake_case ,**_snake_case ) def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ : List[str] = {} if "candidate_labels" in kwargs: lowercase__ : Any = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowercase__ : Optional[Any] = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]: """simple docstring""" lowercase__ : List[Any] = load_image(_snake_case ) lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework ) lowercase__ : str = candidate_labels lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels] lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case ) lowercase__ : Optional[int] = [text_inputs] return inputs def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' ) lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] ,_snake_case ): lowercase__ : List[str] = text_inputs[0] else: # Batching case. lowercase__ : int = text_inputs[0][0] lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case ) lowercase__ : Union[str, Any] = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any: """simple docstring""" lowercase__ : Dict = model_outputs.pop('''candidate_labels''' ) lowercase__ : Optional[Any] = model_outputs['''logits'''][0] if self.framework == "pt": lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) lowercase__ : Tuple = probs.tolist() if not isinstance(_snake_case ,_snake_case ): lowercase__ : Any = [scores] elif self.framework == "tf": lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 ) lowercase__ : Optional[Any] = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) lowercase__ : Union[str, Any] = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] ) ] return result
16
0
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class snake_case__ : """simple docstring""" def __UpperCAmelCase ( self : str , __lowerCamelCase : Dict ) -> List[Any]: raise NotImplementedError() def __UpperCAmelCase ( self : List[str] ) -> List[Any]: raise NotImplementedError() class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : "AutoTokenizer" , __lowerCamelCase : bool = False , **__lowerCamelCase : str ) -> Any: a = tokenizer a = skip_prompt a = decode_kwargs # variables used in the streaming process a = [] a = 0 a = True def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str ) -> List[Any]: if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("TextStreamer only supports batch size 1" ) elif len(value.shape ) > 1: a = value[0] if self.skip_prompt and self.next_tokens_are_prompt: a = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("\n" ): a = text[self.print_len :] a = [] a = 0 # If the last token is a CJK character, we print the characters. elif len(__lowerCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): a = text[self.print_len :] self.print_len += len(__lowerCamelCase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: a = text[self.print_len : text.rfind(" " ) + 1] self.print_len += len(__lowerCamelCase ) self.on_finalized_text(__lowerCamelCase ) def __UpperCAmelCase ( self : int ) -> int: # Flush the cache, if it exists if len(self.token_cache ) > 0: a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) a = text[self.print_len :] a = [] a = 0 else: a = "" a = True self.on_finalized_text(__lowerCamelCase , stream_end=__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Dict: print(__lowerCamelCase , flush=__lowerCamelCase , end="" if not stream_end else None ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[Any] ) -> Any: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e_00 and cp <= 0X9f_ff) or (cp >= 0X34_00 and cp <= 0X4d_bf) # or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) # or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) # or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) # or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) # or (cp >= 0Xf9_00 and cp <= 0Xfa_ff) or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) # ): # return True return False class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : Tuple , __lowerCamelCase : "AutoTokenizer" , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[float] = None , **__lowerCamelCase : Any ) -> int: super().__init__(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) a = Queue() a = None a = timeout def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Tuple: self.text_queue.put(__lowerCamelCase , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self : Dict ) -> str: return self def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: a = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
107
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]: print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )] for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): lowercase__ : List[str] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(__lowerCamelCase ): # looping through rows of graph array for i in range(__lowerCamelCase ): # looping through columns of graph array for j in range(__lowerCamelCase ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): lowercase__ : str = dist[i][k] + dist[k][j] _print_dist(__lowerCamelCase , __lowerCamelCase ) return dist, v if __name__ == "__main__": lowerCAmelCase_ = int(input('Enter number of vertices: ')) lowerCAmelCase_ = int(input('Enter number of edges: ')) lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): lowerCAmelCase_ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) lowerCAmelCase_ = int(input('Enter source:')) lowerCAmelCase_ = int(input('Enter destination:')) lowerCAmelCase_ = float(input('Enter weight:')) lowerCAmelCase_ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
16
0
"""simple docstring""" import re def a__ ( SCREAMING_SNAKE_CASE : str ): '''simple docstring''' lowerCAmelCase : Tuple = re.compile( r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" ) return bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": lowerCAmelCase__ = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
108
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class __A ( A_ ): '''simple docstring''' def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None: """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,) super().__init__(*_snake_case ,**_snake_case )
16
0
"""simple docstring""" import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets A: Optional[int] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n" A: int = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n" A: Any = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[ """https://en.wikipedia.org/wiki/ROUGE_(metric)""", """https://github.com/google-research/google-research/tree/master/rouge""", ] , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: '''simple docstring''' if rouge_types is None: UpperCAmelCase : int = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""] UpperCAmelCase : str = rouge_scorer.RougeScorer(rouge_types=_SCREAMING_SNAKE_CASE , use_stemmer=_SCREAMING_SNAKE_CASE ) if use_aggregator: UpperCAmelCase : Optional[int] = scoring.BootstrapAggregator() else: UpperCAmelCase : Tuple = [] for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase : int = scorer.score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if use_aggregator: aggregator.add_scores(_SCREAMING_SNAKE_CASE ) else: scores.append(_SCREAMING_SNAKE_CASE ) if use_aggregator: UpperCAmelCase : Tuple = aggregator.aggregate() else: UpperCAmelCase : List[Any] = {} for key in scores[0]: UpperCAmelCase : Optional[Any] = [score[key] for score in scores] return result
109
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
16
0
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _a : @staticmethod def lowerCamelCase_ ( *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Optional[int] ) -> Dict: """simple docstring""" pass def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _a ( unittest.TestCase ): _lowercase : Dict = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any ) -> Optional[int]: """simple docstring""" lowercase__ = DepthEstimationPipeline(model=UpperCamelCase_ , image_processor=UpperCamelCase_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ) -> Any: """simple docstring""" lowercase__ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , UpperCamelCase_ ) import datasets lowercase__ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) lowercase__ = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , UpperCamelCase_ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def lowerCamelCase_ ( self: int ) -> Dict: """simple docstring""" pass @slow @require_torch def lowerCamelCase_ ( self: str ) -> Union[str, Any]: """simple docstring""" lowercase__ = '''Intel/dpt-large''' lowercase__ = pipeline('''depth-estimation''' , model=UpperCamelCase_ ) lowercase__ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) lowercase__ = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 ) @require_torch def lowerCamelCase_ ( self: Dict ) -> Dict: """simple docstring""" self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
110
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class __A ( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Any = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Any ) -> Tuple: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Any = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) def UpperCAmelCase ( self : int ) -> List[Any]: """simple docstring""" lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
16
0
from __future__ import annotations from typing import Generic, TypeVar _A = TypeVar('T') class UpperCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self , A_ ) -> None: __UpperCamelCase =data __UpperCamelCase =self __UpperCamelCase =0 class UpperCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: __UpperCamelCase ={} def _a ( self , A_ ) -> None: __UpperCamelCase =DisjointSetTreeNode(_snake_case ) def _a ( self , A_ ) -> DisjointSetTreeNode[T]: __UpperCamelCase =self.map[data] if elem_ref != elem_ref.parent: __UpperCamelCase =self.find_set(elem_ref.parent.data ) return elem_ref.parent def _a ( self , A_ , A_ ) -> None: if nodea.rank > nodea.rank: __UpperCamelCase =nodea else: __UpperCamelCase =nodea if nodea.rank == nodea.rank: nodea.rank += 1 def _a ( self , A_ , A_ ) -> None: self.link(self.find_set(_snake_case ) , self.find_set(_snake_case ) ) class UpperCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: __UpperCamelCase ={} def _a ( self , A_ ) -> None: if node not in self.connections: __UpperCamelCase ={} def _a ( self , A_ , A_ , A_ ) -> None: self.add_node(_snake_case ) self.add_node(_snake_case ) __UpperCamelCase =weight __UpperCamelCase =weight def _a ( self ) -> GraphUndirectedWeighted[T]: __UpperCamelCase =[] __UpperCamelCase =set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda A_ : x[2] ) # creating the disjoint set __UpperCamelCase =DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(_snake_case ) # MST generation __UpperCamelCase =0 __UpperCamelCase =0 __UpperCamelCase =GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __UpperCamelCase =edges[index] index += 1 __UpperCamelCase =disjoint_set.find_set(_snake_case ) __UpperCamelCase =disjoint_set.find_set(_snake_case ) if parent_u != parent_v: num_edges += 1 graph.add_edge(_snake_case , _snake_case , _snake_case ) disjoint_set.union(_snake_case , _snake_case ) return graph
62
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int: lowercase__ : int = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F'''{solution() = }''')
16
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase__ : Dict = logging.get_logger(__name__) class UpperCamelCase__ ( A_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["pixel_values"] def __init__( self : Any , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ): super().__init__(**_snake_case ) lowerCAmelCase_ : List[str] = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4} lowerCAmelCase_ : Optional[Any] = get_size_dict(_snake_case , default_to_square=_snake_case ) lowerCAmelCase_ : Optional[int] = do_resize lowerCAmelCase_ : str = size lowerCAmelCase_ : Optional[Any] = resample lowerCAmelCase_ : Optional[Any] = do_rescale lowerCAmelCase_ : Union[str, Any] = rescale_factor lowerCAmelCase_ : Dict = do_normalize lowerCAmelCase_ : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCAmelCase_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD lowerCAmelCase_ : Optional[Any] = do_convert_rgb def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ): lowerCAmelCase_ : List[str] = get_size_dict(_snake_case , default_to_square=_snake_case ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" ) lowerCAmelCase_ : Tuple = (size['''height'''], size['''width''']) return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, float] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ): return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ): return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Dict , ): lowerCAmelCase_ : List[str] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Tuple = resample if resample is not None else self.resample lowerCAmelCase_ : int = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : Tuple = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : str = image_std if image_std is not None else self.image_std lowerCAmelCase_ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCAmelCase_ : List[Any] = size if size is not None else self.size lowerCAmelCase_ : str = get_size_dict(_snake_case , default_to_square=_snake_case ) lowerCAmelCase_ : Union[str, Any] = make_list_of_images(_snake_case ) if not valid_images(_snake_case ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCAmelCase_ : List[str] = [convert_to_rgb(_snake_case ) for image in images] # All transformations expect numpy arrays. lowerCAmelCase_ : List[Any] = [to_numpy_array(_snake_case ) for image in images] if do_resize: lowerCAmelCase_ : Optional[Any] = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images] if do_rescale: lowerCAmelCase_ : Tuple = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images] if do_normalize: lowerCAmelCase_ : Dict = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images] lowerCAmelCase_ : List[str] = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images] lowerCAmelCase_ : Optional[Any] = BatchFeature(data={'pixel_values': images} , tensor_type=_snake_case ) return encoded_outputs
224
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __A ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" debug_launcher(test_script.main ) def UpperCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" debug_launcher(test_ops.main )
16
0
'''simple docstring''' import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowercase__ : int = logging.get_logger(__name__) def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[Any] ) -> Tuple[int, int]: def constraint_to_multiple_of(__snake_case : List[str] , __snake_case : int , __snake_case : Union[str, Any]=0 , __snake_case : Dict=None ): __A : Dict = round(val / multiple ) * multiple if max_val is not None and x > max_val: __A : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: __A : int = math.ceil(val / multiple ) * multiple return x __A : Dict = (output_size, output_size) if isinstance(__lowerCamelCase , __lowerCamelCase ) else output_size __A : str = get_image_size(__lowerCamelCase ) __A : str = output_size # determine new height and width __A : str = output_height / input_height __A : Any = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width __A : List[Any] = scale_width else: # fit height __A : int = scale_height __A : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__lowerCamelCase ) __A : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=__lowerCamelCase ) return (new_height, new_width) class SCREAMING_SNAKE_CASE (A_ ): lowerCAmelCase = ["pixel_values"] def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = False , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ): '''simple docstring''' super().__init__(**_snake_case) __A : List[str] = size if size is not None else {'''height''': 384, '''width''': 384} __A : Dict = get_size_dict(_snake_case) __A : str = do_resize __A : List[Any] = size __A : Optional[Any] = keep_aspect_ratio __A : Optional[int] = ensure_multiple_of __A : Dict = resample __A : Union[str, Any] = do_rescale __A : Optional[int] = rescale_factor __A : Any = do_normalize __A : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __A : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = 1 , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ): '''simple docstring''' __A : str = get_size_dict(_snake_case) if "height" not in size or "width" not in size: raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}') __A : List[str] = get_resize_output_image_size( _snake_case , output_size=(size['height'], size['width']) , keep_aspect_ratio=_snake_case , multiple=_snake_case , ) return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ): '''simple docstring''' return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ): '''simple docstring''' return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ): '''simple docstring''' __A : Tuple = do_resize if do_resize is not None else self.do_resize __A : Dict = size if size is not None else self.size __A : Dict = get_size_dict(_snake_case) __A : Union[str, Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio __A : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of __A : List[Any] = resample if resample is not None else self.resample __A : List[str] = do_rescale if do_rescale is not None else self.do_rescale __A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __A : List[str] = do_normalize if do_normalize is not None else self.do_normalize __A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean __A : Tuple = image_std if image_std is not None else self.image_std __A : int = make_list_of_images(_snake_case) if not valid_images(_snake_case): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __A : Tuple = [to_numpy_array(_snake_case) for image in images] if do_resize: __A : Dict = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case) for image in images] if do_rescale: __A : str = [self.rescale(image=_snake_case , scale=_snake_case) for image in images] if do_normalize: __A : str = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case) for image in images] __A : Union[str, Any] = [to_channel_dimension_format(_snake_case , _snake_case) for image in images] __A : Any = {'''pixel_values''': images} return BatchFeature(data=_snake_case , tensor_type=_snake_case) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' __A : List[str] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_snake_case) != len(_snake_case): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits') if is_torch_tensor(_snake_case): __A : Optional[int] = target_sizes.numpy() __A : Tuple = [] for idx in range(len(_snake_case)): __A : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=_snake_case) __A : Any = resized_logits[0].argmax(dim=0) semantic_segmentation.append(_snake_case) else: __A : Optional[Any] = logits.argmax(dim=1) __A : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
190
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase_ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A = { '''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''], '''tokenization_cpmant''': ['''CpmAntTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ '''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CpmAntForCausalLM''', '''CpmAntModel''', '''CpmAntPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
160
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A ( metaclass=A_ ): '''simple docstring''' lowerCAmelCase : List[str] = ["torch", "torchsde"] def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]: """simple docstring""" requires_backends(self ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] )
16
0
import sys from pathlib import Path _lowerCamelCase : List[Any] = Path(__file__).resolve().parents[3] / "src" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) _lowerCamelCase : Union[str, Any] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"} _lowerCamelCase : str = "zero2" _lowerCamelCase : Any = "zero3" _lowerCamelCase : Union[str, Any] = [ZEROa, ZEROa] def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : str ) -> Union[str, Any]: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param UpperCAmelCase : Optional[Any] = parameterized.to_safe_name('''_'''.join(str(__lowerCamelCase ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test _lowerCamelCase : str = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __UpperCAmelCase ( A_ ): @parameterized.expand(_snake_case, name_func=_snake_case ) def __magic_name__ ( self : str, __A : Tuple, __A : str ): self.run_and_check( stage=_snake_case, model=_snake_case, distributed=_snake_case, fpaa=_snake_case, ) @require_torch_multi_gpu @parameterized.expand(_snake_case, name_func=_snake_case ) def __magic_name__ ( self : int, __A : Optional[int], __A : Dict ): self.run_and_check( stage=_snake_case, model=_snake_case, distributed=_snake_case, fpaa=_snake_case, ) @parameterized.expand(_snake_case, name_func=_snake_case ) def __magic_name__ ( self : int, __A : str, __A : List[Any] ): self.run_and_check( stage=_snake_case, model=_snake_case, distributed=_snake_case, fpaa=_snake_case, ) @require_torch_multi_gpu @parameterized.expand(_snake_case, name_func=_snake_case ) def __magic_name__ ( self : List[str], __A : str, __A : str ): self.run_and_check( stage=_snake_case, model=_snake_case, distributed=_snake_case, fpaa=_snake_case, ) def __magic_name__ ( self : List[Any], __A : Any ): pass def __magic_name__ ( self : Tuple, __A : str, __A : str, __A : int = 1_0, __A : bool = True, __A : bool = True, __A : bool = True, ): UpperCAmelCase : Optional[int] = models[model] UpperCAmelCase : Optional[int] = self.run_trainer( stage=_snake_case, model_name=_snake_case, eval_steps=_snake_case, num_train_epochs=1, distributed=_snake_case, fpaa=_snake_case, ) self.do_checks(_snake_case ) return output_dir def __magic_name__ ( self : Any, __A : str, __A : str, __A : int = 1_0, __A : int = 1, __A : bool = True, __A : bool = True, ): UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir('''./xxx''', after=_snake_case ) UpperCAmelCase : Union[str, Any] = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(_snake_case )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files UpperCAmelCase : Any = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() UpperCAmelCase : Optional[int] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] UpperCAmelCase : int = self.get_launcher(_snake_case ) UpperCAmelCase : List[str] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_snake_case, env=self.get_env() ) return output_dir def __magic_name__ ( self : Optional[Any], __A : List[str]=False ): UpperCAmelCase : Optional[int] = min(2, get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
336
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 class __A ( A_ ): '''simple docstring''' pass def __UpperCAmelCase ( __lowerCamelCase ) -> Dict: for shard in shards: for i in range(__lowerCamelCase ): yield {"i": i, "shard": shard} def __UpperCAmelCase ( ) -> Tuple: lowercase__ : int = int(os.environ['''RANK'''] ) lowercase__ : str = int(os.environ['''WORLD_SIZE'''] ) lowercase__ : List[Any] = ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCamelCase ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase ) parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 ) lowercase__ : int = parser.parse_args() lowercase__ : Optional[Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]} lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase ) if not streaming: lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) ) lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase ) lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase ) lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : str = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) lowercase__ : str = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
16
0
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging __A = logging.get_logger(__name__) # TODO: upload to AWS __A = { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json" ), } class snake_case ( A_ ): SCREAMING_SNAKE_CASE_ : str = "retribert" def __init__( self : Optional[Any] , UpperCamelCase__ : int=3_0_5_2_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : int=1_2 , UpperCamelCase__ : Union[str, Any]=3_0_7_2 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=5_1_2 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : str=1e-12 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=1_2_8 , UpperCamelCase__ : str=0 , **UpperCamelCase__ : Union[str, Any] , )-> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=_snake_case , **_snake_case) __lowerCAmelCase: List[str] = vocab_size __lowerCAmelCase: Optional[Any] = hidden_size __lowerCAmelCase: int = num_hidden_layers __lowerCAmelCase: int = num_attention_heads __lowerCAmelCase: List[Any] = hidden_act __lowerCAmelCase: Dict = intermediate_size __lowerCAmelCase: Tuple = hidden_dropout_prob __lowerCAmelCase: int = attention_probs_dropout_prob __lowerCAmelCase: Optional[Any] = max_position_embeddings __lowerCAmelCase: List[Any] = type_vocab_size __lowerCAmelCase: Optional[Any] = initializer_range __lowerCAmelCase: List[str] = layer_norm_eps __lowerCAmelCase: int = share_encoders __lowerCAmelCase: Dict = projection_dim
217
"""simple docstring""" from ...configuration_utils import PretrainedConfig lowerCAmelCase_ = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : str = "tapas" def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_snake_case ,**_snake_case ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowercase__ : Optional[int] = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Optional[int] = hidden_act lowercase__ : List[Any] = intermediate_size lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Dict = attention_probs_dropout_prob lowercase__ : str = max_position_embeddings lowercase__ : Dict = type_vocab_sizes lowercase__ : Optional[Any] = initializer_range lowercase__ : Dict = layer_norm_eps # Fine-tuning task hyperparameters lowercase__ : Any = positive_label_weight lowercase__ : int = num_aggregation_labels lowercase__ : List[str] = aggregation_loss_weight lowercase__ : Optional[int] = use_answer_as_supervision lowercase__ : Optional[Any] = answer_loss_importance lowercase__ : Union[str, Any] = use_normalized_answer_loss lowercase__ : str = huber_loss_delta lowercase__ : str = temperature lowercase__ : int = aggregation_temperature lowercase__ : List[Any] = use_gumbel_for_cells lowercase__ : Tuple = use_gumbel_for_aggregation lowercase__ : Union[str, Any] = average_approximation_function lowercase__ : Union[str, Any] = cell_selection_preference lowercase__ : Any = answer_loss_cutoff lowercase__ : List[Any] = max_num_rows lowercase__ : str = max_num_columns lowercase__ : int = average_logits_per_cell lowercase__ : str = select_one_column lowercase__ : str = allow_empty_column_selection lowercase__ : Any = init_cell_selection_weights_to_zero lowercase__ : Optional[int] = reset_position_index_per_cell lowercase__ : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters lowercase__ : Optional[Any] = aggregation_labels lowercase__ : List[Any] = no_aggregation_label_index if isinstance(self.aggregation_labels ,_snake_case ): lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
16
0
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1E-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , ): '''simple docstring''' __a : Dict = parent __a : Any = batch_size __a : Union[str, Any] = image_size __a : Dict = patch_size __a : int = num_channels __a : Any = embed_dim __a : int = depths __a : Dict = num_heads __a : List[Any] = window_size __a : int = mlp_ratio __a : Optional[int] = qkv_bias __a : str = hidden_dropout_prob __a : List[Any] = attention_probs_dropout_prob __a : Dict = drop_path_rate __a : int = hidden_act __a : Tuple = use_absolute_embeddings __a : Tuple = patch_norm __a : Tuple = layer_norm_eps __a : Optional[Any] = initializer_range __a : int = is_training __a : Optional[int] = scope __a : str = use_labels __a : Dict = type_sequence_label_size __a : Union[str, Any] = encoder_stride def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Optional[Any] = None if self.use_labels: __a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Union[str, Any] = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self ): '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Any = SwinvaModel(config=_snake_case ) model.to(_snake_case ) model.eval() __a : str = model(_snake_case ) __a : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __a : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case ) model.to(_snake_case ) model.eval() __a : Tuple = model(_snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __a : Optional[int] = 1 __a : List[Any] = SwinvaForMaskedImageModeling(_snake_case ) model.to(_snake_case ) model.eval() __a : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __a : str = model(_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Tuple = self.type_sequence_label_size __a : Dict = SwinvaForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() __a : str = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.prepare_config_and_inputs() __a : Union[str, Any] = config_and_inputs __a : List[str] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( A_ , A_ , unittest.TestCase ): A_ = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) A_ = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = SwinvaModelTester(self ) __a : List[str] = ConfigTester(self , config_class=_snake_case , embed_dim=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) @unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Swinv2 does not use inputs_embeds' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : List[Any] = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __a : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : str = model_class(_snake_case ) __a : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Optional[Any] = [*signature.parameters.keys()] __a : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _snake_case ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __a : Tuple = True for model_class in self.all_model_classes: __a : Optional[int] = True __a : str = False __a : Union[str, Any] = True __a : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): __a : str = model(**self._prepare_for_class(_snake_case , _snake_case ) ) __a : Dict = outputs.attentions __a : Any = len(self.model_tester.depths ) self.assertEqual(len(_snake_case ) , _snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __a : List[Any] = True __a : Optional[Any] = config.window_size**2 __a : Any = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): __a : List[str] = model(**self._prepare_for_class(_snake_case , _snake_case ) ) __a : Optional[Any] = outputs.attentions self.assertEqual(len(_snake_case ) , _snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) __a : Optional[Any] = len(_snake_case ) # Check attention is always last and order is fine __a : Optional[int] = True __a : Tuple = True __a : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): __a : Optional[Any] = model(**self._prepare_for_class(_snake_case , _snake_case ) ) if hasattr(self.model_tester , 'num_hidden_states_types' ): __a : int = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states __a : List[str] = 2 self.assertEqual(out_len + added_hidden_states , len(_snake_case ) ) __a : Optional[int] = outputs.attentions self.assertEqual(len(_snake_case ) , _snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def __UpperCAmelCase ( self , __a , __a , __a , __a ): '''simple docstring''' __a : List[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): __a : int = model(**self._prepare_for_class(_snake_case , _snake_case ) ) __a : Optional[int] = outputs.hidden_states __a : List[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_snake_case ) , _snake_case ) # Swinv2 has a different seq_length __a : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __a : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __a : Tuple = outputs.reshaped_hidden_states self.assertEqual(len(_snake_case ) , _snake_case ) __a : List[str] = reshaped_hidden_states[0].shape __a : int = ( reshaped_hidden_states[0].view(_snake_case , _snake_case , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __a : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __a : List[str] = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : str = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __a : List[Any] = 3 __a : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __a : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __a : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __a : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __a : str = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : Dict = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.model_tester.prepare_config_and_inputs_for_common() __a : Tuple = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: __a : Optional[int] = model_class(config=_snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ) if is_vision_available() else None ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to( _snake_case ) __a : Union[str, Any] = self.default_image_processor __a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) __a : Dict = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): __a : Optional[Any] = model(**_snake_case ) # verify the logits __a : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _snake_case ) __a : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
27
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : '''simple docstring''' def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]: """simple docstring""" lowercase__ : Dict = parent lowercase__ : Any = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Dict = patch_size lowercase__ : int = num_channels lowercase__ : Any = embed_dim lowercase__ : int = depths lowercase__ : Dict = num_heads lowercase__ : List[Any] = window_size lowercase__ : int = mlp_ratio lowercase__ : Optional[int] = qkv_bias lowercase__ : str = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : Dict = drop_path_rate lowercase__ : int = hidden_act lowercase__ : Tuple = use_absolute_embeddings lowercase__ : Tuple = patch_norm lowercase__ : Tuple = layer_norm_eps lowercase__ : Optional[Any] = initializer_range lowercase__ : int = is_training lowercase__ : Optional[int] = scope lowercase__ : str = use_labels lowercase__ : Dict = type_sequence_label_size lowercase__ : Union[str, Any] = encoder_stride def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None if self.use_labels: lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : Any = SwinvaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ) lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any: """simple docstring""" lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Tuple = model(_snake_case ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ : Optional[int] = 1 lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : str = model(_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any: """simple docstring""" lowercase__ : Tuple = self.type_sequence_label_size lowercase__ : Dict = SwinvaForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Union[str, Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowerCAmelCase : Optional[int] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : List[Any] = False lowerCAmelCase : Dict = False lowerCAmelCase : List[Any] = False lowerCAmelCase : Any = False def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[Any] = SwinvaModelTester(self ) lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 ) def UpperCAmelCase ( self : int ) -> Any: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : str ) -> List[Any]: """simple docstring""" lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' ) def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''' ) def UpperCAmelCase ( self : List[str] ) -> str: """simple docstring""" pass def UpperCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowercase__ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) ) def UpperCAmelCase ( self : int ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(_snake_case ) lowercase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple = True for model_class in self.all_model_classes: lowercase__ : Optional[int] = True lowercase__ : str = False lowercase__ : Union[str, Any] = True lowercase__ : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Dict = outputs.attentions lowercase__ : Any = len(self.model_tester.depths ) self.assertEqual(len(_snake_case ) ,_snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : List[Any] = True lowercase__ : Optional[Any] = config.window_size**2 lowercase__ : Any = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(_snake_case ) ,_snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) lowercase__ : Optional[Any] = len(_snake_case ) # Check attention is always last and order is fine lowercase__ : Optional[int] = True lowercase__ : Tuple = True lowercase__ : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) if hasattr(self.model_tester ,'''num_hidden_states_types''' ): lowercase__ : int = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowercase__ : List[str] = 2 self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) ) lowercase__ : Optional[int] = outputs.attentions self.assertEqual(len(_snake_case ) ,_snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ : List[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Optional[int] = outputs.hidden_states lowercase__ : List[Any] = getattr( self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_snake_case ) ,_snake_case ) # Swinv2 has a different seq_length lowercase__ : Dict = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) lowercase__ : Tuple = outputs.reshaped_hidden_states self.assertEqual(len(_snake_case ) ,_snake_case ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape lowercase__ : int = ( reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def UpperCAmelCase ( self : Tuple ) -> int: """simple docstring""" lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : str = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) def UpperCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = 3 lowercase__ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase__ : Optional[int] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowercase__ : str = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) def UpperCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def UpperCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: lowercase__ : Optional[int] = model_class(config=_snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @require_vision @require_torch class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : Any ) -> List[str]: """simple docstring""" lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to( _snake_case ) lowercase__ : Union[str, Any] = self.default_image_processor lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case ) # forward pass with torch.no_grad(): lowercase__ : Optional[Any] = model(**_snake_case ) # verify the logits lowercase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
16
0
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time A__ : List[Any] =Lock() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__lowerCamelCase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() _lowerCAmelCase = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left _lowerCAmelCase = min(__lowerCamelCase , __lowerCamelCase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__lowerCamelCase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() _lowerCAmelCase = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right _lowerCAmelCase = max(__lowerCamelCase , __lowerCamelCase ) # after all swaps are performed, send the values back to main result_pipe[1].send(__lowerCamelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop _lowerCAmelCase = Pipe() _lowerCAmelCase = Pipe() process_array_.append( Process( target=__lowerCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) _lowerCAmelCase = temp_rs _lowerCAmelCase = temp_rr for i in range(1 , len(__lowerCamelCase ) - 1 ): _lowerCAmelCase = Pipe() _lowerCAmelCase = Pipe() process_array_.append( Process( target=__lowerCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) _lowerCAmelCase = temp_rs _lowerCAmelCase = temp_rr process_array_.append( Process( target=__lowerCamelCase , args=( len(__lowerCamelCase ) - 1, arr[len(__lowerCamelCase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__lowerCamelCase ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__lowerCamelCase ) ): _lowerCAmelCase = result_pipe[p][0].recv() process_array_[p].join() return arr def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = list(range(10 , 0 , -1 ) ) print("""Initial List""" ) print(*__lowerCamelCase ) _lowerCAmelCase = odd_even_transposition(__lowerCamelCase ) print("""Sorted List\n""" ) print(*__lowerCamelCase ) if __name__ == "__main__": main()
70
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict: """simple docstring""" import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]: """simple docstring""" if NLTK_VERSION >= version.Version('''3.6.5''' ): lowercase__ : int = [ meteor_score.single_meteor_score( word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] else: lowercase__ : Tuple = [ meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] return {"meteor": np.mean(_snake_case )}
16
0
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') ) return token def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def a__ ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : Optional[Any] = 1_000 __SCREAMING_SNAKE_CASE : Optional[Any] = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Dict = num_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : Optional[Any] = idalabel __SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": __SCREAMING_SNAKE_CASE : int = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": __SCREAMING_SNAKE_CASE : int = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __SCREAMING_SNAKE_CASE : List[Any] = [2, 2, 20] __SCREAMING_SNAKE_CASE : Any = [3, 12, 16] __SCREAMING_SNAKE_CASE : Tuple = [192, 768, 1_024] __SCREAMING_SNAKE_CASE : List[Any] = CvtForImageClassification(__lowerCamelCase ) __SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) ) __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __SCREAMING_SNAKE_CASE : Any = list_of_state_dict + cls_token(__lowerCamelCase ) __SCREAMING_SNAKE_CASE : Any = list_of_state_dict + embeddings(__lowerCamelCase ) for cnt in range(config.depth[idx] ): __SCREAMING_SNAKE_CASE : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase ) __SCREAMING_SNAKE_CASE : List[Any] = list_of_state_dict + final() for gg in list_of_state_dict: print(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) image_processor.save_pretrained(__lowerCamelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you\'d like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowercase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
303
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = '▁' lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'} lowerCAmelCase_ = { 'vocab_file': { 'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model', } } lowerCAmelCase_ = { 'facebook/xglm-564M': 2_048, } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : int = ["input_ids", "attention_mask"] def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None: """simple docstring""" lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase__ : Any = 7 lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )] lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,) lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_snake_case ) ) lowercase__ : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase__ : Optional[int] = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase__ : List[str] = len(self.sp_model ) lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(_snake_case ) lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int ) -> Optional[int]: """simple docstring""" lowercase__ : List[Any] = self.__dict__.copy() lowercase__ : Optional[int] = None lowercase__ : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any: """simple docstring""" lowercase__ : int = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): lowercase__ : Dict = {} lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase__ : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case ) if token_ids_a is None: return [1] + ([0] * len(_snake_case )) return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ : List[Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def UpperCAmelCase ( self : str ) -> Tuple: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_snake_case ,out_type=_snake_case ) def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip() return out_string def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : Any = os.path.join( _snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case ,'''wb''' ) as fi: lowercase__ : Dict = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,)
16
0
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowerCAmelCase = logging.get_logger(__name__) class A_ ( A_ ): """simple docstring""" def __init__( self :Optional[int] , **lowerCamelCase_ :List[str] ): """simple docstring""" requires_backends(self , ['bs4'] ) super().__init__(**_snake_case ) def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ): """simple docstring""" lowerCamelCase__ : Optional[int] =[] lowerCamelCase__ : int =[] lowerCamelCase__ : Optional[int] =element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag lowerCamelCase__ : Union[str, Any] =parent.find_all(child.name , recursive=_snake_case ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) ) lowerCamelCase__ : List[str] =parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :Any ): """simple docstring""" lowerCamelCase__ : str =BeautifulSoup(_snake_case , 'html.parser' ) lowerCamelCase__ : Union[str, Any] =[] lowerCamelCase__ : Tuple =[] lowerCamelCase__ : str =[] for element in html_code.descendants: if type(_snake_case ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue lowerCamelCase__ : Tuple =html.unescape(_snake_case ).strip() if not text_in_this_tag: continue all_doc_strings.append(_snake_case ) lowerCamelCase__ : List[str] =self.xpath_soup(_snake_case ) stringaxtag_seq.append(_snake_case ) stringaxsubs_seq.append(_snake_case ) if len(_snake_case ) != len(_snake_case ): raise ValueError('Number of doc strings and xtags does not correspond' ) if len(_snake_case ) != len(_snake_case ): raise ValueError('Number of doc strings and xsubs does not correspond' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Dict , lowerCamelCase_ :int ): """simple docstring""" lowerCamelCase__ : int ='''''' for tagname, subs in zip(_snake_case , _snake_case ): xpath += f"""/{tagname}""" if subs != 0: xpath += f"""[{subs}]""" return xpath def __call__( self :int , lowerCamelCase_ :Optional[int] ): """simple docstring""" lowerCamelCase__ : List[str] =False # Check that strings has a valid type if isinstance(_snake_case , _snake_case ): lowerCamelCase__ : List[Any] =True elif isinstance(_snake_case , (list, tuple) ): if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ): lowerCamelCase__ : Union[str, Any] =True if not valid_strings: raise ValueError( 'HTML strings must of type `str`, `List[str]` (batch of examples), ' f"""but is of type {type(_snake_case )}.""" ) lowerCamelCase__ : Union[str, Any] =bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) ) if not is_batched: lowerCamelCase__ : Optional[Any] =[html_strings] # Get nodes + xpaths lowerCamelCase__ : int =[] lowerCamelCase__ : Optional[int] =[] for html_string in html_strings: lowerCamelCase__ : List[Any] =self.get_three_from_single(_snake_case ) nodes.append(_snake_case ) lowerCamelCase__ : int =[] for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ): lowerCamelCase__ : Optional[Any] =self.construct_xpath(_snake_case , _snake_case ) xpath_strings.append(_snake_case ) xpaths.append(_snake_case ) # return as Dict lowerCamelCase__ : str ={'''nodes''': nodes, '''xpaths''': xpaths} lowerCamelCase__ : List[Any] =BatchFeature(data=_snake_case , tensor_type=_snake_case ) return encoded_inputs
126
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]: print(f"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase ) else: lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase ) if hidden_sizes == 1_92: lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase ) if hidden_sizes == 2_56: lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase ) if hidden_sizes == 3_84: lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase ) from_model.eval() lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval() lowercase__ : str = OrderedDict() lowercase__ : int = from_model.state_dict() lowercase__ : Dict = list(from_model.state_dict().keys() ) lowercase__ : Any = list(our_model.state_dict().keys() ) print(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for i in range(len(__lowerCamelCase ) ): lowercase__ : str = weights[og_keys[i]] our_model.load_state_dict(__lowerCamelCase ) lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) ) lowercase__ : Optional[int] = from_model(__lowerCamelCase ) lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one." lowercase__ : Any = name print(__lowerCamelCase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) lowercase__ : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f"""Pushed {checkpoint_name}""" ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]: lowercase__ : Any = '''imagenet-1k-id2label.json''' lowercase__ : Tuple = 10_00 lowercase__ : Dict = (1, num_labels) lowercase__ : List[str] = '''huggingface/label-files''' lowercase__ : str = num_labels lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} lowercase__ : Union[str, Any] = idalabel lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()} lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) lowercase__ : Tuple = { '''levit-128S''': 1_28, '''levit-128''': 1_28, '''levit-192''': 1_92, '''levit-256''': 2_56, '''levit-384''': 3_84, } lowercase__ : Any = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, expected_shape if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
16
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class UpperCAmelCase ( A_ ): A__ : List[Any] = "poolformer" def __init__(self : Union[str, Any] , snake_case__ : Dict=3 , snake_case__ : Dict=16 , snake_case__ : Dict=16 , snake_case__ : List[Any]=3 , snake_case__ : str=4.0 , snake_case__ : str=[2, 2, 6, 2] , snake_case__ : Any=[64, 1_28, 3_20, 5_12] , snake_case__ : Any=[7, 3, 3, 3] , snake_case__ : Any=[4, 2, 2, 2] , snake_case__ : List[str]=[2, 1, 1, 1] , snake_case__ : Dict=4 , snake_case__ : List[str]=0.0 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=True , snake_case__ : List[str]=1e-5 , snake_case__ : Any=0.02 , **snake_case__ : Tuple , ) -> int: '''simple docstring''' snake_case : Union[str, Any] = num_channels snake_case : str = patch_size snake_case : Union[str, Any] = stride snake_case : Union[str, Any] = padding snake_case : Union[str, Any] = pool_size snake_case : Dict = hidden_sizes snake_case : Tuple = mlp_ratio snake_case : str = depths snake_case : List[Any] = patch_sizes snake_case : Dict = strides snake_case : Optional[int] = num_encoder_blocks snake_case : List[str] = drop_path_rate snake_case : str = hidden_act snake_case : str = use_layer_scale snake_case : List[Any] = layer_scale_init_value snake_case : List[Any] = initializer_range super().__init__(**_snake_case ) class UpperCAmelCase ( A_ ): A__ : List[str] = version.parse("1.11" ) @property def _SCREAMING_SNAKE_CASE (self : str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _SCREAMING_SNAKE_CASE (self : List[str] ) -> float: '''simple docstring''' return 2e-3
59
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __A : '''simple docstring''' lowerCAmelCase : List[str] lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "dict" lowerCAmelCase : ClassVar[Any] = None lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ ) def __call__( self : List[str] ) -> Any: """simple docstring""" return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class __A : '''simple docstring''' lowerCAmelCase : Optional[List] = None lowerCAmelCase : Optional[int] = None lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "dict" lowerCAmelCase : ClassVar[Any] = None lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ ) def UpperCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None lowercase__ : Dict = len(self.languages ) if self.languages else None def __call__( self : List[Any] ) -> List[Any]: """simple docstring""" return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int: """simple docstring""" lowercase__ : List[Any] = set(self.languages ) if self.languages and set(_snake_case ) - lang_set: raise ValueError( f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. lowercase__ : str = [] for lang, text in translation_dict.items(): if isinstance(_snake_case ,_snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) ) return {"language": languages, "translation": translations} def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
16
0
import os from typing import Dict, List, Tuple, TypeVar, Union _A = TypeVar('T') _A = Union[List[T], Tuple[T, ...]] _A = Union[T, List[T], Dict[str, T]] _A = Union[str, bytes, os.PathLike]
62
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]: lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowercase__ : int = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__ : str = datasets.map( __lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__ : Optional[int] = 16 elif accelerator.mixed_precision != "no": lowercase__ : List[Any] = 8 else: lowercase__ : int = None return tokenizer.pad( __lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , ) # Instantiate dataloaders. lowercase__ : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) lowercase__ : str = DataLoader( tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase_ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1": lowercase__ : List[Any] = 2 # Initialize accelerator lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : str = config['''lr'''] lowercase__ : str = int(config['''num_epochs'''] ) lowercase__ : Optional[int] = int(config['''seed'''] ) lowercase__ : Tuple = int(config['''batch_size'''] ) lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__lowerCamelCase ) def inner_training_loop(__lowerCamelCase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__lowerCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__ : Tuple = model.to(accelerator.device ) # Instantiate optimizer lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase ) lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase ) # Instantiate scheduler lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Now we train the model for epoch in range(__lowerCamelCase ): model.train() for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase__ : Dict = model(**__lowerCamelCase ) lowercase__ : List[Any] = outputs.loss accelerator.backward(__lowerCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ : Tuple = model(**__lowerCamelCase ) lowercase__ : Any = outputs.logits.argmax(dim=-1 ) lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__lowerCamelCase , references=__lowerCamelCase , ) lowercase__ : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ) -> Dict: lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowercase__ : int = parser.parse_args() lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
16
0
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase__ ( A_, unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = XLMTokenizer _SCREAMING_SNAKE_CASE = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ : Dict = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] lowerCAmelCase_ : Tuple = dict(zip(_snake_case , range(len(_snake_case ) ) ) ) lowerCAmelCase_ : Any = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(_snake_case ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(_snake_case ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): lowerCAmelCase_ : List[str] = '''lower newer''' lowerCAmelCase_ : Optional[int] = '''lower newer''' return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : str ): lowerCAmelCase_ : str = XLMTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ : Tuple = '''lower''' lowerCAmelCase_ : List[Any] = ['''low''', '''er</w>'''] lowerCAmelCase_ : str = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) lowerCAmelCase_ : Optional[int] = tokens + ['''<unk>'''] lowerCAmelCase_ : Optional[int] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowerCAmelCase_ : Optional[int] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' ) lowerCAmelCase_ : str = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case ) lowerCAmelCase_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case ) lowerCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(_snake_case ) lowerCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
224
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __UpperCAmelCase ( __lowerCamelCase ) -> Any: lowercase__ : Optional[int] = [] embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", f"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", f"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", f"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", f"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict: lowercase__ : str = [] attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple: lowercase__ : List[str] = [] token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') ) return token def __UpperCAmelCase ( ) -> Optional[int]: lowercase__ : List[str] = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int: lowercase__ : List[Any] = '''imagenet-1k-id2label.json''' lowercase__ : Optional[Any] = 10_00 lowercase__ : Optional[Any] = '''huggingface/label-files''' lowercase__ : Dict = num_labels lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) ) lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()} lowercase__ : Optional[Any] = idalabel lowercase__ : str = {v: k for k, v in idalabel.items()} lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": lowercase__ : int = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": lowercase__ : int = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : List[Any] = [2, 2, 20] lowercase__ : Any = [3, 12, 16] lowercase__ : Tuple = [1_92, 7_68, 10_24] lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase ) lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) lowercase__ : List[str] = image_size lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) ) lowercase__ : int = OrderedDict() lowercase__ : List[Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase ) lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase ) for cnt in range(config.depth[idx] ): lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase ) lowercase__ : List[Any] = list_of_state_dict + final() for gg in list_of_state_dict: print(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) ): lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) image_processor.save_pretrained(__lowerCamelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--cvt_model', default='cvt-w24', type=str, help='Name of the cvt model you\'d like to convert.', ) parser.add_argument( '--image_size', default=384, type=int, help='Input Image Size', ) parser.add_argument( '--cvt_file_name', default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth', type=str, help='Input Image Size', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
16
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule lowercase__ : List[str] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
190
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str: if not isinstance(__lowerCamelCase , __lowerCamelCase ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowercase__ : Tuple = '''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__lowerCamelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
16
0
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __A ( ) -> str: __a : Tuple = HfArgumentParser(__lowerCamelCase) __a : Optional[Any] = parser.parse_args_into_dataclasses()[0] __a : int = TensorFlowBenchmark(args=__lowerCamelCase) try: __a : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: __a : Optional[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' __a : List[str] = ''' '''.join(str(__lowerCamelCase).split(''' ''')[:-1]) __a : Tuple = '''''' __a : List[Any] = eval(str(__lowerCamelCase).split(''' ''')[-1]) __a : Any = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:]) else: wrong_args.append(__lowerCamelCase) if len(__lowerCamelCase) > 0: __a : Optional[int] = full_error_msg + begin_error_msg + str(__lowerCamelCase) raise ValueError(__lowerCamelCase) benchmark.run() if __name__ == "__main__": main()
160
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : '''simple docstring''' def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str: """simple docstring""" lowercase__ : int = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Optional[Any] = image_size lowercase__ : Optional[Any] = num_channels lowercase__ : Optional[Any] = embeddings_size lowercase__ : Optional[Any] = hidden_sizes lowercase__ : str = depths lowercase__ : Tuple = is_training lowercase__ : List[Any] = use_labels lowercase__ : Union[str, Any] = hidden_act lowercase__ : Union[str, Any] = num_labels lowercase__ : Tuple = scope lowercase__ : Optional[Any] = len(_snake_case ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Tuple = None if self.use_labels: lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels ) lowercase__ : int = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return ResNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]: """simple docstring""" lowercase__ : Optional[int] = TFResNetModel(config=_snake_case ) lowercase__ : List[str] = model(_snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple: """simple docstring""" lowercase__ : Tuple = self.num_labels lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case ) lowercase__ : List[str] = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCAmelCase ( self : Tuple ) -> str: """simple docstring""" lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowerCAmelCase : Any = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase : List[Any] = False lowerCAmelCase : List[Any] = False lowerCAmelCase : int = False lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : List[str] = False def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ : Optional[Any] = TFResNetModelTester(self ) lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ) def UpperCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def UpperCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass def UpperCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(_snake_case ) lowercase__ : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[int] = [*signature.parameters.keys()] lowercase__ : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : Tuple ) -> Any: """simple docstring""" lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ): lowercase__ : str = model_class(_snake_case ) lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Tuple = self.model_tester.num_stages self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ : List[Any] = layer_type lowercase__ : Dict = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def UpperCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def __UpperCAmelCase ( ) -> Dict: lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : str ) -> Any: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase__ : Any = self.default_image_processor lowercase__ : int = prepare_img() lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' ) # forward pass lowercase__ : Dict = model(**_snake_case ) # verify the logits lowercase__ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
16
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[Any] = { "configuration_xlm_roberta_xl": [ "XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaXLConfig", "XLMRobertaXLOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ "XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaXLForCausalLM", "XLMRobertaXLForMaskedLM", "XLMRobertaXLForMultipleChoice", "XLMRobertaXLForQuestionAnswering", "XLMRobertaXLForSequenceClassification", "XLMRobertaXLForTokenClassification", "XLMRobertaXLModel", "XLMRobertaXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
336
"""simple docstring""" import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]: if "model" in orig_key: lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1] lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: lowercase__ : str = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: lowercase__ : Optional[Any] = '''yoso.''' + orig_key return orig_key def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: for key in orig_state_dict.copy().keys(): lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase ) if ("pooler" in key) or ("sen_class" in key): continue else: lowercase__ : Tuple = val lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias'''] lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2 return orig_state_dict def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict'''] lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase ) lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase ) lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase ) print(model.load_state_dict(__lowerCamelCase ) ) model.eval() model.save_pretrained(__lowerCamelCase ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for YOSO model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase_ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
16
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class snake_case ( unittest.TestCase ): @slow def lowercase_ ( self : int)-> Any: '''simple docstring''' __lowerCAmelCase: Tuple = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_snake_case).to(_snake_case) __lowerCAmelCase: int = AutoTokenizer.from_pretrained("google/mt5-small") __lowerCAmelCase: Optional[Any] = tokenizer("Hello there" , return_tensors="pt").input_ids __lowerCAmelCase: List[Any] = tokenizer("Hi I am" , return_tensors="pt").input_ids __lowerCAmelCase: Optional[Any] = model(input_ids.to(_snake_case) , labels=labels.to(_snake_case)).loss __lowerCAmelCase: Tuple = -(labels.shape[-1] * loss.item()) __lowerCAmelCase: List[str] = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
217
"""simple docstring""" import os def __UpperCAmelCase ( ) -> int: with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file: lowercase__ : List[Any] = str(file.readlines()[0] ) lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' ) names.sort() lowercase__ : int = 0 lowercase__ : Optional[Any] = 0 for i, name in enumerate(__lowerCamelCase ): for letter in name: name_score += ord(__lowerCamelCase ) - 64 total_score += (i + 1) * name_score lowercase__ : List[str] = 0 return total_score if __name__ == "__main__": print(solution())
16
0
'''simple docstring''' import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class __UpperCamelCase ( unittest.TestCase ): def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ): '''simple docstring''' __a : Optional[int] = parent __a : str = batch_size __a : Any = seq_length __a : Union[str, Any] = is_training __a : Tuple = use_attention_mask __a : Tuple = use_token_type_ids __a : Optional[Any] = use_labels __a : Tuple = vocab_size __a : Any = hidden_size __a : Optional[Any] = num_hidden_layers __a : Dict = num_attention_heads __a : Union[str, Any] = intermediate_size __a : Tuple = hidden_act __a : List[str] = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : Dict = max_position_embeddings __a : Union[str, Any] = type_vocab_size __a : Tuple = type_sequence_label_size __a : Optional[int] = initializer_range __a : str = num_choices def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Optional[int] = None if self.use_attention_mask: __a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __a : Any = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_snake_case , ) return config, input_ids, attention_mask def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.prepare_config_and_inputs() __a : List[str] = config_and_inputs __a : Tuple = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __UpperCamelCase ( A_ , unittest.TestCase ): A_ = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = FlaxDistilBertModelTester(self ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: __a : Union[str, Any] = model_class_name.from_pretrained('distilbert-base-uncased' ) __a : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(_snake_case ) @require_flax class __UpperCamelCase ( unittest.TestCase ): @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' ) __a : str = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __a : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __a : Optional[Any] = model(_snake_case , attention_mask=_snake_case )[0] __a : Union[str, Any] = (1, 11, 768) self.assertEqual(output.shape , _snake_case ) __a : Tuple = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 ) )
27
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(A_ ) class __A ( A_ ): '''simple docstring''' def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]: """simple docstring""" super().__init__(**_snake_case ) requires_backends(self ,'''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]: """simple docstring""" return super().__call__(_snake_case ,**_snake_case ) def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ : List[str] = {} if "candidate_labels" in kwargs: lowercase__ : Any = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowercase__ : Optional[Any] = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]: """simple docstring""" lowercase__ : List[Any] = load_image(_snake_case ) lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework ) lowercase__ : str = candidate_labels lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels] lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case ) lowercase__ : Optional[int] = [text_inputs] return inputs def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' ) lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] ,_snake_case ): lowercase__ : List[str] = text_inputs[0] else: # Batching case. lowercase__ : int = text_inputs[0][0] lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case ) lowercase__ : Union[str, Any] = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any: """simple docstring""" lowercase__ : Dict = model_outputs.pop('''candidate_labels''' ) lowercase__ : Optional[Any] = model_outputs['''logits'''][0] if self.framework == "pt": lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) lowercase__ : Tuple = probs.tolist() if not isinstance(_snake_case ,_snake_case ): lowercase__ : Any = [scores] elif self.framework == "tf": lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 ) lowercase__ : Optional[Any] = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) lowercase__ : Union[str, Any] = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] ) ] return result
16
0
'''simple docstring''' from PIL import Image def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level)) def contrast(lowerCAmelCase ) -> int: return int(1_28 + factor * (c - 1_28) ) return img.point(__lowerCamelCase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 A__ : Dict =change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
70
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]: print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )] for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): lowercase__ : List[str] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(__lowerCamelCase ): # looping through rows of graph array for i in range(__lowerCamelCase ): # looping through columns of graph array for j in range(__lowerCamelCase ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): lowercase__ : str = dist[i][k] + dist[k][j] _print_dist(__lowerCamelCase , __lowerCamelCase ) return dist, v if __name__ == "__main__": lowerCAmelCase_ = int(input('Enter number of vertices: ')) lowerCAmelCase_ = int(input('Enter number of edges: ')) lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): lowerCAmelCase_ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) lowerCAmelCase_ = int(input('Enter source:')) lowerCAmelCase_ = int(input('Enter destination:')) lowerCAmelCase_ = float(input('Enter weight:')) lowerCAmelCase_ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
16
0
from __future__ import annotations lowercase_ = list[list[int]] # assigning initial values to the grid lowercase_ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution lowercase_ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a__ ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a__ ( snake_case ): """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a__ ( snake_case ): """simple docstring""" if location := find_empty_location(__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Dict = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Any = digit if sudoku(__lowerCamelCase ) is not None: return grid __SCREAMING_SNAKE_CASE : Tuple = 0 return None def a__ ( snake_case ): """simple docstring""" for row in grid: for cell in row: print(__lowerCamelCase , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") lowercase_ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
303
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class __A ( A_ ): '''simple docstring''' def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None: """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,) super().__init__(*_snake_case ,**_snake_case )
16
0
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase = pd.read_csv( """https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/""" """position_salaries.csv""" ) lowerCAmelCase = dataset.iloc[:, 1:2].values lowerCAmelCase = dataset.iloc[:, 2].values lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase = PolynomialFeatures(degree=4) lowerCAmelCase = poly_reg.fit_transform(X) lowerCAmelCase = LinearRegression() pol_reg.fit(X_poly, y) def lowerCAmelCase_ ( ) ->Tuple: plt.scatter(__lowerCamelCase , __lowerCamelCase , color='red' ) plt.plot(__lowerCamelCase , pol_reg.predict(poly_reg.fit_transform(__lowerCamelCase ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
126
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
16
0
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = """The Nymphenburg Palace is a beautiful palace in Munich!""" def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ): snake_case : Tuple = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 1024, '''hidden_size''': 768, '''max_length''': 512, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 1024, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } snake_case : Optional[Any] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py snake_case : Tuple = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , __lowerCamelCase ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later snake_case : List[str] = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab snake_case : Union[str, Any] = os.path.join(get_home_dir() , "models" ) snake_case : List[str] = _load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase ) snake_case : str = nlp.model.BERTModel( __lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , ) original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase ) snake_case : List[Any] = original_bort._collect_params_with_prefix() # Build our config 🤗 snake_case : Optional[int] = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(__lowerCamelCase ), } snake_case : str = BertConfig.from_dict(__lowerCamelCase ) snake_case : str = BertForMaskedLM(__lowerCamelCase ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(__lowerCamelCase : str ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(__lowerCamelCase : str , __lowerCamelCase : Tuple ): snake_case : str = hf_param.shape snake_case : int = to_torch(params[gluon_param] ) snake_case : Optional[Any] = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param snake_case : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) snake_case : int = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) snake_case : int = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) snake_case : Optional[int] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) snake_case : int = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): snake_case : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention snake_case : BertSelfAttention = layer.attention.self snake_case : Any = check_and_map_params( self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) snake_case : int = check_and_map_params( self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) snake_case : Optional[int] = check_and_map_params( self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) snake_case : int = check_and_map_params( self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) snake_case : Optional[int] = check_and_map_params( self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) snake_case : Optional[Any] = check_and_map_params( self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output snake_case : BertSelfOutput = layer.attention.output snake_case : Union[str, Any] = check_and_map_params( self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" ) snake_case : List[str] = check_and_map_params( self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" ) snake_case : List[str] = check_and_map_params( self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) snake_case : Any = check_and_map_params( self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate snake_case : BertIntermediate = layer.intermediate snake_case : Dict = check_and_map_params( intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) snake_case : Union[str, Any] = check_and_map_params( intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output snake_case : BertOutput = layer.output snake_case : str = check_and_map_params( bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) snake_case : Any = check_and_map_params( bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) snake_case : int = check_and_map_params( bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) snake_case : Any = check_and_map_params( bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models snake_case : str = RobertaTokenizer.from_pretrained("roberta-base" ) snake_case : Tuple = tokenizer.encode_plus(__lowerCamelCase )['''input_ids'''] # Get gluon output snake_case : List[str] = mx.nd.array([input_ids] ) snake_case : Union[str, Any] = original_bort(inputs=__lowerCamelCase , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(__lowerCamelCase ) snake_case : List[str] = BertModel.from_pretrained(__lowerCamelCase ) hf_bort_model.eval() snake_case : Tuple = tokenizer.encode_plus(__lowerCamelCase , return_tensors="pt" ) snake_case : Dict = hf_bort_model(**__lowerCamelCase )[0] snake_case : List[str] = output_gluon[0].asnumpy() snake_case : Optional[Any] = output_hf[0].detach().numpy() snake_case : List[str] = np.max(np.abs(hf_layer - gluon_layer ) ).item() snake_case : List[str] = np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , __lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __lowerCamelCase = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
59
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class __A ( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Any = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Any ) -> Tuple: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Any = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case ) lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case ) lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained( _snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) @slow def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) def UpperCAmelCase ( self : int ) -> List[Any]: """simple docstring""" lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 ) lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
16
0
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def _UpperCAmelCase ( ): print('Making key files...' ) make_key_files('rsa' , 10_24 ) print('Key files generation successful.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ): print('Generating prime p...' ) __UpperCamelCase =rabinMiller.generate_large_prime(__lowerCamelCase ) print('Generating prime q...' ) __UpperCamelCase =rabinMiller.generate_large_prime(__lowerCamelCase ) __UpperCamelCase =p * q print('Generating e that is relatively prime to (p - 1) * (q - 1)...' ) while True: __UpperCamelCase =random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(__lowerCamelCase , (p - 1) * (q - 1) ) == 1: break print('Calculating d that is mod inverse of e...' ) __UpperCamelCase =cryptoMath.find_mod_inverse(__lowerCamelCase , (p - 1) * (q - 1) ) __UpperCamelCase =(n, e) __UpperCamelCase =(n, d) return (public_key, private_key) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ): if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ): print('\nWARNING:' ) print( F'\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n' 'Use a different name or delete these files and re-run this program.' ) sys.exit() __UpperCamelCase =generate_key(__lowerCamelCase ) print(F'\nWriting public key to file {name}_pubkey.txt...' ) with open(F'{name}_pubkey.txt' , 'w' ) as out_file: out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' ) print(F'Writing private key to file {name}_privkey.txt...' ) with open(F'{name}_privkey.txt' , 'w' ) as out_file: out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' ) if __name__ == "__main__": main()
62
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int: lowercase__ : int = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F'''{solution() = }''')
16
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ViTImageProcessor if is_vision_available() else None @property def SCREAMING_SNAKE_CASE__ ( self : str ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowerCAmelCase_ : int = (3, 3_2, 1_2_8) lowerCAmelCase_ : List[str] = tempfile.mkdtemp() # fmt: off lowerCAmelCase_ : Any = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on lowerCAmelCase_ : Tuple = dict(zip(_snake_case , range(len(_snake_case ) ) ) ) lowerCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_snake_case ) + '\n' ) lowerCAmelCase_ : Optional[Any] = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 3_2, '''width''': 1_2_8}, } lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , _snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(_snake_case , _snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Any , **SCREAMING_SNAKE_CASE_ : Dict ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Any , **SCREAMING_SNAKE_CASE_ : List[Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : int ): lowerCAmelCase_ : Optional[Any] = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta ) lowerCAmelCase_ : Any = Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) return image_input def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowerCAmelCase_ : List[str] = self.get_tokenizer() lowerCAmelCase_ : Union[str, Any] = self.get_image_processor() lowerCAmelCase_ : int = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ : Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _snake_case ) def SCREAMING_SNAKE_CASE__ ( self : int ): lowerCAmelCase_ : Dict = self.get_tokenizer() lowerCAmelCase_ : Dict = self.get_image_processor() lowerCAmelCase_ : List[str] = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) lowerCAmelCase_ : List[str] = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 ) lowerCAmelCase_ : Dict = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowerCAmelCase_ : List[Any] = self.get_image_processor() lowerCAmelCase_ : Dict = self.get_tokenizer() lowerCAmelCase_ : List[Any] = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case ) lowerCAmelCase_ : Tuple = self.prepare_image_inputs() lowerCAmelCase_ : Any = image_processor(_snake_case , return_tensors='np' ) lowerCAmelCase_ : str = processor(images=_snake_case , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowerCAmelCase_ : List[str] = self.get_image_processor() lowerCAmelCase_ : Any = self.get_tokenizer() lowerCAmelCase_ : str = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case ) lowerCAmelCase_ : Tuple = '''test''' lowerCAmelCase_ : str = processor(text=_snake_case ) lowerCAmelCase_ : List[Any] = tokenizer(_snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE__ ( self : int ): lowerCAmelCase_ : Any = self.get_image_processor() lowerCAmelCase_ : Optional[int] = self.get_tokenizer() lowerCAmelCase_ : Union[str, Any] = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case ) lowerCAmelCase_ : Union[str, Any] = '''test''' lowerCAmelCase_ : List[Any] = self.prepare_image_inputs() lowerCAmelCase_ : Tuple = processor(text=_snake_case , images=_snake_case ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] ) # test if it raises when no input is passed with pytest.raises(_snake_case ): processor() def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowerCAmelCase_ : Optional[int] = self.get_image_processor() lowerCAmelCase_ : Dict = self.get_tokenizer() lowerCAmelCase_ : Union[str, Any] = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case ) lowerCAmelCase_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase_ : List[Any] = processor.char_decode(_snake_case ) lowerCAmelCase_ : Dict = tokenizer.batch_decode(_snake_case ) lowerCAmelCase_ : Optional[int] = [seq.replace(' ' , '' ) for seq in decoded_tok] self.assertListEqual(_snake_case , _snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowerCAmelCase_ : int = self.get_image_processor() lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer() lowerCAmelCase_ : int = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case ) lowerCAmelCase_ : List[Any] = None lowerCAmelCase_ : List[Any] = self.prepare_image_inputs() lowerCAmelCase_ : Tuple = processor(text=_snake_case , images=_snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowerCAmelCase_ : Optional[int] = self.get_image_processor() lowerCAmelCase_ : Tuple = self.get_tokenizer() lowerCAmelCase_ : Any = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case ) lowerCAmelCase_ : Tuple = torch.randn(1 , 2_7 , 3_8 ) lowerCAmelCase_ : int = torch.randn(1 , 2_7 , 5_0_2_5_7 ) lowerCAmelCase_ : List[Any] = torch.randn(1 , 2_7 , 3_0_5_2_2 ) lowerCAmelCase_ : Any = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
224
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __A ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" debug_launcher(test_script.main ) def UpperCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" debug_launcher(test_ops.main )
16
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : int = { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class SCREAMING_SNAKE_CASE (A_ ): lowerCAmelCase = "speech_to_text_2" lowerCAmelCase = ["past_key_values"] lowerCAmelCase = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self , _UpperCAmelCase=1_0000 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=1024 , **_UpperCAmelCase , ): '''simple docstring''' __A : Dict = vocab_size __A : List[Any] = d_model __A : int = decoder_ffn_dim __A : int = decoder_layers __A : str = decoder_attention_heads __A : Tuple = dropout __A : str = attention_dropout __A : Union[str, Any] = activation_dropout __A : int = activation_function __A : int = init_std __A : Tuple = decoder_layerdrop __A : Dict = use_cache __A : str = decoder_layers __A : Any = scale_embedding # scale factor will be sqrt(d_model) if True __A : Optional[Any] = max_target_positions super().__init__( pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
190
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase_ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer A = logging.get_logger(__name__) A = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } A = { '''distilbert-base-uncased''': 512, '''distilbert-base-uncased-distilled-squad''': 512, '''distilbert-base-cased''': 512, '''distilbert-base-cased-distilled-squad''': 512, '''distilbert-base-german-cased''': 512, '''distilbert-base-multilingual-cased''': 512, } A = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class __lowercase ( A_ ): '''simple docstring''' __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase = ["input_ids", "attention_mask"] __lowerCAmelCase = DistilBertTokenizer def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , ) __a : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars ): __a : List[str] = getattr(_snake_case , normalizer_state.pop('''type''' ) ) __a : Union[str, Any] = do_lower_case __a : Dict = strip_accents __a : str = tokenize_chinese_chars __a : Optional[Any] = normalizer_class(**_snake_case ) __a : Any = do_lower_case def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ): __a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __a : Any = [self.sep_token_id] __a : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __a : Optional[int] = self._tokenizer.model.save(_snake_case , name=_snake_case ) return tuple(_snake_case )
160
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A ( metaclass=A_ ): '''simple docstring''' lowerCAmelCase : List[str] = ["torch", "torchsde"] def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]: """simple docstring""" requires_backends(self ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] )
16
0
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCAmelCase ( A_ ): def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = tempfile.mkdtemp() UpperCAmelCase : Tuple = 8 # DPR tok UpperCAmelCase : Optional[int] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname, '''dpr_tokenizer''' ) os.makedirs(_snake_case, exist_ok=_snake_case ) UpperCAmelCase : Dict = os.path.join(_snake_case, DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok UpperCAmelCase : List[str] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] UpperCAmelCase : Union[str, Any] = dict(zip(_snake_case, range(len(_snake_case ) ) ) ) UpperCAmelCase : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase : List[Any] = {'''unk_token''': '''<unk>'''} UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname, '''bart_tokenizer''' ) os.makedirs(_snake_case, exist_ok=_snake_case ) UpperCAmelCase : Optional[Any] = os.path.join(_snake_case, BART_VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase : Dict = os.path.join(_snake_case, BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(_snake_case ) + '''\n''' ) with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_snake_case ) ) def __magic_name__ ( self : Dict ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''dpr_tokenizer''' ) ) def __magic_name__ ( self : Optional[Any] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''dpr_tokenizer''' ) ) def __magic_name__ ( self : List[Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''bart_tokenizer''' ) ) def __magic_name__ ( self : Any ): shutil.rmtree(self.tmpdirname ) def __magic_name__ ( self : str ): UpperCAmelCase : Any = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''', string_factory='''Flat''', metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __magic_name__ ( self : List[Any] ): UpperCAmelCase : int = self.get_dummy_dataset() UpperCAmelCase : Dict = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: UpperCAmelCase : str = dataset UpperCAmelCase : List[Any] = RagRetriever( _snake_case, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) return retriever def __magic_name__ ( self : int, __A : bool ): UpperCAmelCase : Optional[Any] = self.get_dummy_dataset() UpperCAmelCase : Optional[int] = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name='''custom''', ) if from_disk: UpperCAmelCase : List[str] = os.path.join(self.tmpdirname, '''dataset''' ) UpperCAmelCase : Any = os.path.join(self.tmpdirname, '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname, '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname, '''dataset''' ) ) del dataset UpperCAmelCase : List[str] = RagRetriever( _snake_case, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) else: UpperCAmelCase : str = RagRetriever( _snake_case, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, _snake_case ), ) return retriever def __magic_name__ ( self : Dict ): UpperCAmelCase : List[Any] = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''', string_factory='''Flat''', metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCAmelCase : str = os.path.join(self.tmpdirname, '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''', index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''], open(index_file_name + '''.index_meta.dpr''', '''wb''' ) ) UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, '''psgs_w100.tsv.pkl''' ) UpperCAmelCase : Optional[Any] = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(_snake_case, open(_snake_case, '''wb''' ) ) UpperCAmelCase : Dict = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name='''legacy''', index_path=self.tmpdirname, ) UpperCAmelCase : List[str] = RagRetriever( _snake_case, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __magic_name__ ( self : Dict ): UpperCAmelCase : Dict = 1 UpperCAmelCase : Tuple = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa ) UpperCAmelCase : Union[str, Any] = retriever.retrieve(_snake_case, n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ), 2 ) self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ), _snake_case ) self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]] ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : int = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: UpperCAmelCase : Optional[Any] = self.get_dummy_dataset() retriever.save_pretrained(_snake_case ) UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case, _snake_case ) UpperCAmelCase : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa ) UpperCAmelCase : int = retriever.retrieve(_snake_case, n_docs=1 ) self.assertTrue(out is not None ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Optional[int] = 1 UpperCAmelCase : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) UpperCAmelCase : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa ) UpperCAmelCase : Dict = retriever.retrieve(_snake_case, n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ), 2 ) self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ), _snake_case ) self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]] ) def __magic_name__ ( self : str ): UpperCAmelCase : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_snake_case ) UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case, _snake_case ) UpperCAmelCase : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa ) UpperCAmelCase : str = retriever.retrieve(_snake_case, n_docs=1 ) self.assertTrue(out is not None ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : Any = 1 UpperCAmelCase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) UpperCAmelCase : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa ) UpperCAmelCase : List[str] = retriever.retrieve(_snake_case, n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ), 2 ) self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ), _snake_case ) self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]] ) def __magic_name__ ( self : str ): UpperCAmelCase : Any = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_snake_case ) UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case, _snake_case ) UpperCAmelCase : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa ) UpperCAmelCase : int = retriever.retrieve(_snake_case, n_docs=1 ) self.assertTrue(out is not None ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : str = 1 UpperCAmelCase : Dict = self.get_dummy_legacy_index_retriever() UpperCAmelCase : str = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa ) UpperCAmelCase : int = retriever.retrieve(_snake_case, n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ), 2 ) self.assertEqual(sorted(doc_dicts[0] ), ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ), _snake_case ) self.assertEqual(doc_dicts[0]['''text'''][0], '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0], '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]] ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Dict = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_snake_case ) UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case, _snake_case ) UpperCAmelCase : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa ) UpperCAmelCase : Dict = retriever.retrieve(_snake_case, n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __magic_name__ ( self : List[str] ): import torch UpperCAmelCase : int = 1 UpperCAmelCase : Tuple = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase : Union[str, Any] = [[5, 7], [1_0, 1_1]] UpperCAmelCase : str = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa ) UpperCAmelCase : Tuple = retriever(_snake_case, _snake_case, prefix=retriever.config.generator.prefix, n_docs=_snake_case ) UpperCAmelCase : int = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(_snake_case, _snake_case ) self.assertIsInstance(_snake_case, _snake_case ) self.assertIsInstance(_snake_case, np.ndarray ) UpperCAmelCase : Optional[Any] = retriever( _snake_case, _snake_case, prefix=retriever.config.generator.prefix, n_docs=_snake_case, return_tensors='''pt''', ) UpperCAmelCase : Tuple = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(_snake_case, torch.Tensor ) self.assertIsInstance(_snake_case, torch.Tensor ) self.assertIsInstance(_snake_case, torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __magic_name__ ( self : Dict ): UpperCAmelCase : List[str] = self.get_dpr_ctx_encoder_tokenizer() UpperCAmelCase : Dict = 1 UpperCAmelCase : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) retriever.set_ctx_encoder_tokenizer(_snake_case ) UpperCAmelCase : Any = [[5, 7], [1_0, 1_1]] UpperCAmelCase : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa ) UpperCAmelCase : Union[str, Any] = retriever(_snake_case, _snake_case, prefix=retriever.config.generator.prefix, n_docs=_snake_case ) self.assertEqual( len(_snake_case ), 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ), _snake_case ) # check for doc token related keys in dictionary.
336
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 class __A ( A_ ): '''simple docstring''' pass def __UpperCAmelCase ( __lowerCamelCase ) -> Dict: for shard in shards: for i in range(__lowerCamelCase ): yield {"i": i, "shard": shard} def __UpperCAmelCase ( ) -> Tuple: lowercase__ : int = int(os.environ['''RANK'''] ) lowercase__ : str = int(os.environ['''WORLD_SIZE'''] ) lowercase__ : List[Any] = ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCamelCase ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase ) parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 ) lowercase__ : int = parser.parse_args() lowercase__ : Optional[Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]} lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase ) if not streaming: lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) ) lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase ) lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase ) lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : str = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) lowercase__ : str = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
16
0
"""simple docstring""" import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple: __lowerCAmelCase: Optional[int] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg''' __lowerCAmelCase: List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("RGB" ) __lowerCAmelCase: Any = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) __lowerCAmelCase: Dict = transform(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase ) return image def a__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]: if "visual_encoder" in key: __lowerCAmelCase: Any = re.sub("visual_encoder*" , "vision_model.encoder" , __lowerCamelCase ) if "blocks" in key: __lowerCAmelCase: List[str] = re.sub(R"blocks" , "layers" , __lowerCamelCase ) if "attn" in key: __lowerCAmelCase: List[str] = re.sub(R"attn" , "self_attn" , __lowerCamelCase ) if "norm1" in key: __lowerCAmelCase: Dict = re.sub(R"norm1" , "layer_norm1" , __lowerCamelCase ) if "norm2" in key: __lowerCAmelCase: Optional[Any] = re.sub(R"norm2" , "layer_norm2" , __lowerCamelCase ) if "encoder.norm" in key: __lowerCAmelCase: Union[str, Any] = re.sub(R"encoder.norm" , "post_layernorm" , __lowerCamelCase ) if "encoder.patch_embed.proj" in key: __lowerCAmelCase: int = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , __lowerCamelCase ) if "encoder.pos_embed" in key: __lowerCAmelCase: int = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , __lowerCamelCase ) if "encoder.cls_token" in key: __lowerCAmelCase: Dict = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , __lowerCamelCase ) if "self_attn" in key: __lowerCAmelCase: Optional[int] = re.sub(R"self_attn.proj" , "self_attn.projection" , __lowerCamelCase ) return key @torch.no_grad() def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Optional[int]: if config_path is not None: __lowerCAmelCase: Union[str, Any] = BlipConfig.from_pretrained(__lowerCamelCase ) else: __lowerCAmelCase: str = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} ) __lowerCAmelCase: Dict = BlipForConditionalGeneration(__lowerCamelCase ).eval() __lowerCAmelCase: Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth''' __lowerCAmelCase: Optional[int] = blip_decoder(pretrained=__lowerCamelCase , image_size=3_8_4 , vit="base" ) __lowerCAmelCase: Optional[int] = pt_model.eval() __lowerCAmelCase: Union[str, Any] = pt_model.state_dict() for key in modified_state_dict.copy(): __lowerCAmelCase: Any = modified_state_dict.pop(__lowerCamelCase ) __lowerCAmelCase: Union[str, Any] = rename_key(__lowerCamelCase ) __lowerCAmelCase: Union[str, Any] = value hf_model.load_state_dict(__lowerCamelCase ) __lowerCAmelCase: int = 3_8_4 __lowerCAmelCase: Optional[int] = load_demo_image(image_size=__lowerCamelCase , device="cpu" ) __lowerCAmelCase: str = BertTokenizer.from_pretrained("bert-base-uncased" ) __lowerCAmelCase: Dict = tokenizer(["a picture of"] ).input_ids __lowerCAmelCase: Tuple = hf_model.generate(__lowerCamelCase , __lowerCamelCase ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] __lowerCAmelCase: Tuple = hf_model.generate(__lowerCamelCase ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(__lowerCamelCase ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' __lowerCAmelCase: int = ( '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth''' ) __lowerCAmelCase: Optional[Any] = blip_vqa(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit="base" ) vqa_model.eval() __lowerCAmelCase: Optional[Any] = vqa_model.state_dict() for key in modified_state_dict.copy(): __lowerCAmelCase: str = modified_state_dict.pop(__lowerCamelCase ) __lowerCAmelCase: Optional[Any] = rename_key(__lowerCamelCase ) __lowerCAmelCase: List[Any] = value __lowerCAmelCase: Tuple = BlipForQuestionAnswering(__lowerCamelCase ) hf_vqa_model.load_state_dict(__lowerCamelCase ) __lowerCAmelCase: Union[str, Any] = ['''How many dogs are in this image?'''] __lowerCAmelCase: Tuple = tokenizer(__lowerCamelCase , return_tensors="pt" ).input_ids __lowerCAmelCase: Optional[int] = hf_vqa_model.generate(__lowerCamelCase , __lowerCamelCase ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" ) __lowerCAmelCase: Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth''' __lowerCAmelCase: str = blip_itm(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit="base" ) itm_model.eval() __lowerCAmelCase: str = itm_model.state_dict() for key in modified_state_dict.copy(): __lowerCAmelCase: str = modified_state_dict.pop(__lowerCamelCase ) __lowerCAmelCase: List[Any] = rename_key(__lowerCamelCase ) __lowerCAmelCase: Union[str, Any] = value __lowerCAmelCase: Union[str, Any] = BlipForImageTextRetrieval(__lowerCamelCase ) __lowerCAmelCase: Any = ['''A picture of a woman with a dog sitting in a beach'''] __lowerCAmelCase: Optional[Any] = tokenizer( __lowerCamelCase , return_tensors="pt" , padding="max_length" , truncation=__lowerCamelCase , max_length=3_5 , ).input_ids hf_itm_model.load_state_dict(__lowerCamelCase ) hf_itm_model.eval() __lowerCAmelCase: Optional[Any] = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase ) __lowerCAmelCase: Any = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
217
"""simple docstring""" from ...configuration_utils import PretrainedConfig lowerCAmelCase_ = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : str = "tapas" def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_snake_case ,**_snake_case ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowercase__ : Optional[int] = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Optional[int] = hidden_act lowercase__ : List[Any] = intermediate_size lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Dict = attention_probs_dropout_prob lowercase__ : str = max_position_embeddings lowercase__ : Dict = type_vocab_sizes lowercase__ : Optional[Any] = initializer_range lowercase__ : Dict = layer_norm_eps # Fine-tuning task hyperparameters lowercase__ : Any = positive_label_weight lowercase__ : int = num_aggregation_labels lowercase__ : List[str] = aggregation_loss_weight lowercase__ : Optional[int] = use_answer_as_supervision lowercase__ : Optional[Any] = answer_loss_importance lowercase__ : Union[str, Any] = use_normalized_answer_loss lowercase__ : str = huber_loss_delta lowercase__ : str = temperature lowercase__ : int = aggregation_temperature lowercase__ : List[Any] = use_gumbel_for_cells lowercase__ : Tuple = use_gumbel_for_aggregation lowercase__ : Union[str, Any] = average_approximation_function lowercase__ : Union[str, Any] = cell_selection_preference lowercase__ : Any = answer_loss_cutoff lowercase__ : List[Any] = max_num_rows lowercase__ : str = max_num_columns lowercase__ : int = average_logits_per_cell lowercase__ : str = select_one_column lowercase__ : str = allow_empty_column_selection lowercase__ : Any = init_cell_selection_weights_to_zero lowercase__ : Optional[int] = reset_position_index_per_cell lowercase__ : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters lowercase__ : Optional[Any] = aggregation_labels lowercase__ : List[Any] = no_aggregation_label_index if isinstance(self.aggregation_labels ,_snake_case ): lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
16
0
'''simple docstring''' import datasets __lowercase : List[Any] = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' __lowercase : Optional[Any] = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' __lowercase : Tuple = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' return {"accuracy": simple_accuracy(_snake_case , _snake_case )}
27
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : '''simple docstring''' def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]: """simple docstring""" lowercase__ : Dict = parent lowercase__ : Any = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Dict = patch_size lowercase__ : int = num_channels lowercase__ : Any = embed_dim lowercase__ : int = depths lowercase__ : Dict = num_heads lowercase__ : List[Any] = window_size lowercase__ : int = mlp_ratio lowercase__ : Optional[int] = qkv_bias lowercase__ : str = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : Dict = drop_path_rate lowercase__ : int = hidden_act lowercase__ : Tuple = use_absolute_embeddings lowercase__ : Tuple = patch_norm lowercase__ : Tuple = layer_norm_eps lowercase__ : Optional[Any] = initializer_range lowercase__ : int = is_training lowercase__ : Optional[int] = scope lowercase__ : str = use_labels lowercase__ : Dict = type_sequence_label_size lowercase__ : Union[str, Any] = encoder_stride def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None if self.use_labels: lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : Any = SwinvaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ) lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any: """simple docstring""" lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Tuple = model(_snake_case ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ : Optional[int] = 1 lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : str = model(_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any: """simple docstring""" lowercase__ : Tuple = self.type_sequence_label_size lowercase__ : Dict = SwinvaForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Union[str, Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowerCAmelCase : Optional[int] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : List[Any] = False lowerCAmelCase : Dict = False lowerCAmelCase : List[Any] = False lowerCAmelCase : Any = False def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[Any] = SwinvaModelTester(self ) lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 ) def UpperCAmelCase ( self : int ) -> Any: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : str ) -> List[Any]: """simple docstring""" lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' ) def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''' ) def UpperCAmelCase ( self : List[str] ) -> str: """simple docstring""" pass def UpperCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowercase__ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) ) def UpperCAmelCase ( self : int ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(_snake_case ) lowercase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple = True for model_class in self.all_model_classes: lowercase__ : Optional[int] = True lowercase__ : str = False lowercase__ : Union[str, Any] = True lowercase__ : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Dict = outputs.attentions lowercase__ : Any = len(self.model_tester.depths ) self.assertEqual(len(_snake_case ) ,_snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : List[Any] = True lowercase__ : Optional[Any] = config.window_size**2 lowercase__ : Any = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(_snake_case ) ,_snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) lowercase__ : Optional[Any] = len(_snake_case ) # Check attention is always last and order is fine lowercase__ : Optional[int] = True lowercase__ : Tuple = True lowercase__ : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) if hasattr(self.model_tester ,'''num_hidden_states_types''' ): lowercase__ : int = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowercase__ : List[str] = 2 self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) ) lowercase__ : Optional[int] = outputs.attentions self.assertEqual(len(_snake_case ) ,_snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ : List[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Optional[int] = outputs.hidden_states lowercase__ : List[Any] = getattr( self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_snake_case ) ,_snake_case ) # Swinv2 has a different seq_length lowercase__ : Dict = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) lowercase__ : Tuple = outputs.reshaped_hidden_states self.assertEqual(len(_snake_case ) ,_snake_case ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape lowercase__ : int = ( reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def UpperCAmelCase ( self : Tuple ) -> int: """simple docstring""" lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : str = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) def UpperCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = 3 lowercase__ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase__ : Optional[int] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowercase__ : str = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) def UpperCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def UpperCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: lowercase__ : Optional[int] = model_class(config=_snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @require_vision @require_torch class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : Any ) -> List[str]: """simple docstring""" lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to( _snake_case ) lowercase__ : Union[str, Any] = self.default_image_processor lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case ) # forward pass with torch.no_grad(): lowercase__ : Optional[Any] = model(**_snake_case ) # verify the logits lowercase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
16
0
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask A__ : Any =logging.getLogger(__name__) class UpperCAmelCase ( A_ ): def __init__( self : Optional[int] , __snake_case : Tuple=-1 ) -> int: _lowerCAmelCase = label_idx def lowercase__ ( self : Any , __snake_case : int , __snake_case : Union[Split, str] ) -> List[InputExample]: if isinstance(_snake_case , _snake_case ): _lowerCAmelCase = mode.value _lowerCAmelCase = os.path.join(_snake_case , f"{mode}.txt" ) _lowerCAmelCase = 1 _lowerCAmelCase = [] with open(_snake_case , encoding="""utf-8""" ) as f: _lowerCAmelCase = [] _lowerCAmelCase = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_snake_case , labels=_snake_case ) ) guid_index += 1 _lowerCAmelCase = [] _lowerCAmelCase = [] else: _lowerCAmelCase = line.split(""" """ ) words.append(splits[0] ) if len(_snake_case ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_snake_case , labels=_snake_case ) ) return examples def lowercase__ ( self : str , __snake_case : TextIO , __snake_case : TextIO , __snake_case : List ) -> List[Any]: _lowerCAmelCase = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(_snake_case ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _lowerCAmelCase = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(_snake_case ) else: logger.warning("""Maximum sequence length exceeded: No prediction for \'%s\'.""" , line.split()[0] ) def lowercase__ ( self : Tuple , __snake_case : str ) -> List[str]: if path: with open(_snake_case , """r""" ) as f: _lowerCAmelCase = f.read().splitlines() if "O" not in labels: _lowerCAmelCase = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class UpperCAmelCase ( A_ ): def __init__( self : str ) -> Any: super().__init__(label_idx=-2 ) def lowercase__ ( self : str , __snake_case : str ) -> List[str]: if path: with open(_snake_case , """r""" ) as f: _lowerCAmelCase = f.read().splitlines() if "O" not in labels: _lowerCAmelCase = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class UpperCAmelCase ( A_ ): def lowercase__ ( self : int , __snake_case : Optional[int] , __snake_case : Union[Split, str] ) -> List[InputExample]: if isinstance(_snake_case , _snake_case ): _lowerCAmelCase = mode.value _lowerCAmelCase = os.path.join(_snake_case , f"{mode}.txt" ) _lowerCAmelCase = 1 _lowerCAmelCase = [] with open(_snake_case , encoding="""utf-8""" ) as f: for sentence in parse_incr(_snake_case ): _lowerCAmelCase = [] _lowerCAmelCase = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(_snake_case ) == len(_snake_case ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_snake_case , labels=_snake_case ) ) guid_index += 1 return examples def lowercase__ ( self : List[Any] , __snake_case : TextIO , __snake_case : TextIO , __snake_case : List ) -> str: _lowerCAmelCase = 0 for sentence in parse_incr(_snake_case ): _lowerCAmelCase = preds_list[example_id] _lowerCAmelCase = '''''' for token in sentence: out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(_snake_case ) example_id += 1 def lowercase__ ( self : List[str] , __snake_case : str ) -> List[str]: if path: with open(_snake_case , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
70
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict: """simple docstring""" import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]: """simple docstring""" if NLTK_VERSION >= version.Version('''3.6.5''' ): lowercase__ : int = [ meteor_score.single_meteor_score( word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] else: lowercase__ : Tuple = [ meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] return {"meteor": np.mean(_snake_case )}
16
0