code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = GPTaTokenizer _lowerCamelCase: Tuple = GPTaTokenizerFast _lowerCamelCase: str = True _lowerCamelCase: Any = {'''add_prefix_space''': True} _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A = {'unk_token': '<unk>'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,**A_ : Tuple ) -> Any: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,**A_ : List[str] ) -> List[str]: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ) -> Union[str, Any]: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = GPTaTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'lower newer' A = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] A = tokenizer.tokenize(A_ ,add_prefix_space=A_ ) self.assertListEqual(A_ ,A_ ) A = tokens + [tokenizer.unk_token] A = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: if not self.test_rust_tokenizer: return A = self.get_tokenizer() A = self.get_rust_tokenizer(add_prefix_space=A_ ) A = 'lower newer' # Testing tokenization A = tokenizer.tokenize(A_ ,add_prefix_space=A_ ) A = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) # Testing conversion to ids without special tokens A = tokenizer.encode(A_ ,add_special_tokens=A_ ,add_prefix_space=A_ ) A = rust_tokenizer.encode(A_ ,add_special_tokens=A_ ) self.assertListEqual(A_ ,A_ ) # Testing conversion to ids with special tokens A = self.get_rust_tokenizer(add_prefix_space=A_ ) A = tokenizer.encode(A_ ,add_prefix_space=A_ ) A = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ ,A_ ) # Testing the unknown token A = tokens + [rust_tokenizer.unk_token] A = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,*A_ : Any ,**A_ : Dict ) -> Any: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any]=15 ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ ) # Simple input A = 'This is a simple input' A = ['This is a simple input 1', 'This is a simple input 2'] A = ('This is a simple input', 'This is a pair') A = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(A_ ,tokenizer_r.encode ,A_ ,max_length=A_ ,padding='max_length' ) # Simple input self.assertRaises(A_ ,tokenizer_r.encode_plus ,A_ ,max_length=A_ ,padding='max_length' ) # Simple input self.assertRaises( A_ ,tokenizer_r.batch_encode_plus ,A_ ,max_length=A_ ,padding='max_length' ,) # Pair input self.assertRaises(A_ ,tokenizer_r.encode ,A_ ,max_length=A_ ,padding='max_length' ) # Pair input self.assertRaises(A_ ,tokenizer_r.encode_plus ,A_ ,max_length=A_ ,padding='max_length' ) # Pair input self.assertRaises( A_ ,tokenizer_r.batch_encode_plus ,A_ ,max_length=A_ ,padding='max_length' ,) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = GPTaTokenizer.from_pretrained(self.tmpdirname ,pad_token='<pad>' ) # Simple input A = 'This is a simple input' A = ['This is a simple input looooooooong', 'This is a simple input'] A = ('This is a simple input', 'This is a pair') A = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] A = tokenizer.pad_token_id A = tokenizer(A_ ,padding='max_length' ,max_length=30 ,return_tensors='np' ) A = tokenizer(A_ ,padding=A_ ,truncate=A_ ,return_tensors='np' ) A = tokenizer(*A_ ,padding='max_length' ,max_length=60 ,return_tensors='np' ) A = tokenizer(A_ ,padding=A_ ,truncate=A_ ,return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] ,30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] ,33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] ,60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] ,52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: A = '$$$' A = GPTaTokenizer.from_pretrained(self.tmpdirname ,bos_token=A_ ,add_bos_token=A_ ) A = 'This is a simple input' A = ['This is a simple input 1', 'This is a simple input 2'] A = tokenizer.bos_token_id A = tokenizer(A_ ) A = tokenizer(A_ ) self.assertEqual(out_s.input_ids[0] ,A_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) A = tokenizer.decode(out_s.input_ids ) A = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] ,A_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: pass def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: # TODO: change to self.get_tokenizers() when the fast version is implemented A = [self.get_tokenizer(do_lower_case=A_ ,add_bos_token=A_ )] for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A = 'Encode this.' A = 'This one too please.' A = tokenizer.encode(A_ ,add_special_tokens=A_ ) encoded_sequence += tokenizer.encode(A_ ,add_special_tokens=A_ ) A = tokenizer.encode_plus( A_ ,A_ ,add_special_tokens=A_ ,return_special_tokens_mask=A_ ,) A = encoded_sequence_dict['input_ids'] A = encoded_sequence_dict['special_tokens_mask'] self.assertEqual(len(A_ ) ,len(A_ ) ) A = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ ) ] A = [x for x in filtered_sequence if x is not None] self.assertEqual(A_ ,A_ ) @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 A = AutoTokenizer.from_pretrained('facebook/opt-350m' ,from_slow=A_ ) A = 'A photo of a cat' A = tokenizer.encode( A_ ,) self.assertEqual(A_ ,[2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('test_opt' ) A = AutoTokenizer.from_pretrained('./test_opt' ) A = tokenizer.encode( A_ ,) self.assertEqual(A_ ,[2, 250, 1345, 9, 10, 4758] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = AutoTokenizer.from_pretrained('facebook/opt-350m' ,use_slow=A_ ) A = 'A photo of a cat' A = tokenizer.encode( A_ ,) # Same as above self.assertEqual(A_ ,[2, 250, 1345, 9, 10, 4758] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = AutoTokenizer.from_pretrained('facebook/opt-350m' ,from_slow=A_ ) A = 'bos' A = tokenizer.get_vocab()['bos'] A = 'A photo of a cat' A = tokenizer.encode( A_ ,) # We changed the bos token self.assertEqual(A_ ,[3_1957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('./tok' ) A = AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) A = tokenizer.encode( A_ ,) self.assertEqual(A_ ,[3_1957, 250, 1345, 9, 10, 4758] )
74
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class a_ (_a ): __lowerCAmelCase : List[Any] = """microsoft/speecht5_tts""" __lowerCAmelCase : List[Any] = ( """This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """ """text to read (in English) and returns a waveform object containing the sound.""" ) __lowerCAmelCase : List[str] = """text_reader""" __lowerCAmelCase : Optional[Any] = SpeechTaProcessor __lowerCAmelCase : str = SpeechTaForTextToSpeech __lowerCAmelCase : int = SpeechTaHifiGan __lowerCAmelCase : int = ["""text"""] __lowerCAmelCase : int = ["""audio"""] def __UpperCamelCase ( self ): if self.post_processor is None: _lowerCAmelCase : int = """microsoft/speecht5_hifigan""" super().setup() def __UpperCamelCase ( self , snake_case_ , snake_case_=None ): _lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" ) _lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" ) _lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def __UpperCamelCase ( self , snake_case_ ): with torch.no_grad(): return self.model.generate_speech(**snake_case_ ) def __UpperCamelCase ( self , snake_case_ ): with torch.no_grad(): return self.post_processor(snake_case_ ).cpu().detach()
309
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( snake_case__ , unittest.TestCase ): _a : Optional[Any] = ShapEPipeline _a : Union[str, Any] = ["""prompt"""] _a : str = ["""prompt"""] _a : Optional[int] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] _a : List[Any] = False @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.time_input_dim * 4 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 8 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModelWithProjection(_A ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = { "num_attention_heads": 2, "attention_head_dim": 1_6, "embedding_dim": self.time_input_dim, "num_embeddings": 3_2, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __lowerCAmelCase = PriorTransformer(**_A ) return model @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = { "param_shapes": ( (self.renderer_dim, 9_3), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 1_2, "background": ( 0.1, 0.1, 0.1, ), } __lowerCAmelCase = ShapERenderer(**_A ) return model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.dummy_prior __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_renderer __lowerCAmelCase = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1_0_2_4 , prediction_type="sample" , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , ) __lowerCAmelCase = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ): """simple docstring""" if str(_A ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(_A ) else: __lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) __lowerCAmelCase = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 3_2, "output_type": "np", } return inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**_A ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(_A ) ) __lowerCAmelCase = output.images[0] __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (2_0, 3_2, 3_2, 3) __lowerCAmelCase = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = torch_device == "cpu" __lowerCAmelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_A , relax_max_difference=_A , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**_A ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __lowerCAmelCase = 1 __lowerCAmelCase = 2 __lowerCAmelCase = self.get_dummy_inputs(_A ) for key in inputs.keys(): if key in self.batch_params: __lowerCAmelCase = batch_size * [inputs[key]] __lowerCAmelCase = pipe(**_A , num_images_per_prompt=_A )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy" ) __lowerCAmelCase = ShapEPipeline.from_pretrained("openai/shap-e" ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __lowerCAmelCase = torch.Generator(device=_A ).manual_seed(0 ) __lowerCAmelCase = pipe( "a shark" , generator=_A , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="np" , ).images[0] assert images.shape == (2_0, 6_4, 6_4, 3) assert_mean_pixel_difference(_A , _A )
365
from pathlib import Path import fire from tqdm import tqdm def _a ( SCREAMING_SNAKE_CASE_ : Dict="ro" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="en" , SCREAMING_SNAKE_CASE_ : Optional[Any]="wmt16" , SCREAMING_SNAKE_CASE_ : List[str]=None ): try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("run pip install datasets" ) __lowerCAmelCase = F"""{src_lang}-{tgt_lang}""" print(F"""Converting {dataset}-{pair}""" ) __lowerCAmelCase = datasets.load_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if save_dir is None: __lowerCAmelCase = F"""{dataset}-{pair}""" __lowerCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) for split in ds.keys(): print(F"""Splitting {split} with {ds[split].num_rows} records""" ) # to save to val.source, val.target like summary datasets __lowerCAmelCase = "val" if split == "validation" else split __lowerCAmelCase = save_dir.joinpath(F"""{fn}.source""" ) __lowerCAmelCase = save_dir.joinpath(F"""{fn}.target""" ) __lowerCAmelCase = src_path.open("w+" ) __lowerCAmelCase = tgt_path.open("w+" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): __lowerCAmelCase = x["translation"] src_fp.write(ex[src_lang] + "\n" ) tgt_fp.write(ex[tgt_lang] + "\n" ) print(F"""Saved {dataset} dataset to {save_dir}""" ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
102
0
import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __snake_case ( UpperCamelCase_ ,unittest.TestCase ): _a = PhobertTokenizer _a = False def UpperCAmelCase__ ( self : str): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ : Union[str, Any] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@'''] lowerCAmelCase_ : Optional[int] = dict(zip(A_ , range(len(A_)))) lowerCAmelCase_ : Any = ['''#version: 0.2''', '''l à</w>'''] lowerCAmelCase_ : str = {'''unk_token''': '''<unk>'''} lowerCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) lowerCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""") with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp: fp.write('''\n'''.join(A_)) def UpperCAmelCase__ ( self : Tuple , **A_ : Optional[Any]): kwargs.update(self.special_tokens_map) return PhobertTokenizer.from_pretrained(self.tmpdirname , **A_) def UpperCAmelCase__ ( self : str , A_ : int): lowerCAmelCase_ : Tuple = '''Tôi là VinAI Research''' lowerCAmelCase_ : Any = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int]): lowerCAmelCase_ : int = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) lowerCAmelCase_ : Optional[int] = '''Tôi là VinAI Research''' lowerCAmelCase_ : Tuple = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split() lowerCAmelCase_ : List[str] = tokenizer.tokenize(A_) print(A_) self.assertListEqual(A_ , A_) lowerCAmelCase_ : Dict = tokens + [tokenizer.unk_token] lowerCAmelCase_ : Dict = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_) , A_)
103
A__ : Any = '''Tobias Carryer''' from time import time class __snake_case : def __init__( self : Any , A_ : Tuple , A_ : Dict , A_ : Tuple , A_ : str=int(time())): # noqa: B008 lowerCAmelCase_ : int = multiplier lowerCAmelCase_ : int = increment lowerCAmelCase_ : str = modulo lowerCAmelCase_ : str = seed def UpperCAmelCase__ ( self : int): lowerCAmelCase_ : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. A__ : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31) while True: print(lcg.next_number())
103
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowerCAmelCase__ :List[Any] = logging.getLogger(__name__) def lowerCAmelCase__ ( a__: Dict , a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' return (preds == labels).mean() @dataclass class __a : _a : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _a : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _a : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _a : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __a : _a : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) _a : str = field(metadata={'help': 'Should contain the data files for the task.'} ) _a : int = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _a : bool = field( default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def lowerCAmelCase__ ( ) -> List[str]: '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , a__ ) # Set seed set_seed(training_args.seed ) try: _UpperCAmelCase = processors[data_args.task_name]() _UpperCAmelCase = processor.get_labels() _UpperCAmelCase = len(a__ ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) _UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , ) # Get datasets _UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(a__: EvalPrediction ) -> Dict: _UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(a__ , p.label_ids )} # Data collator _UpperCAmelCase = DataCollatorWithPadding(a__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _UpperCAmelCase = Trainer( model=a__ , args=a__ , train_dataset=a__ , eval_dataset=a__ , compute_metrics=a__ , data_collator=a__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _UpperCAmelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate() _UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(a__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , a__ , a__ ) writer.write('%s = %s\n' % (key, value) ) results.update(a__ ) return results def lowerCAmelCase__ ( a__: int ) -> List[str]: '''simple docstring''' main() if __name__ == "__main__": main()
185
import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class __a ( UpperCAmelCase ): _a : Optional[int] = 'MCTCTFeatureExtractor' _a : int = 'AutoTokenizer' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.feature_extractor _UpperCAmelCase = False def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if "raw_speech" in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' ) _UpperCAmelCase = kwargs.pop('raw_speech' ) else: _UpperCAmelCase = kwargs.pop('audio' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = kwargs.pop('sampling_rate' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = kwargs.pop('text' , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _UpperCAmelCase = args[0] _UpperCAmelCase = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: _UpperCAmelCase = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is not None: _UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is None: return inputs elif audio is None: return encodings else: _UpperCAmelCase = encodings['input_ids'] return inputs def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = kwargs.pop('input_features' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = kwargs.pop('labels' , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _UpperCAmelCase = args[0] _UpperCAmelCase = args[1:] if input_features is not None: _UpperCAmelCase = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if labels is not None: _UpperCAmelCase = self.tokenizer.pad(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if labels is None: return input_features elif input_features is None: return labels else: _UpperCAmelCase = labels['input_ids'] return input_features def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @contextmanager def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your audio inputs, or in a separate call.' ) _UpperCAmelCase = True _UpperCAmelCase = self.tokenizer yield _UpperCAmelCase = self.feature_extractor _UpperCAmelCase = False
185
1
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase__ : Tuple = False lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[Any] = "ybelkada/fonts" def lowerCamelCase__ ( ): '''simple docstring''' if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use " "Pix2StructImageProcessor. Please upgrade torch." ) def lowerCamelCase__ ( _A , _A , _A ): '''simple docstring''' requires_backends(_A , ["torch"] ) _check_torch_version() snake_case_ = image_tensor.unsqueeze(0 ) snake_case_ = torch.nn.functional.unfold(_A , (patch_height, patch_width) , stride=(patch_height, patch_width) ) snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _A , _A , -1 ) snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def lowerCamelCase__ ( _A , _A = 36 , _A = "black" , _A = "white" , _A = 5 , _A = 5 , _A = 5 , _A = 5 , _A = None , _A = None , ): '''simple docstring''' requires_backends(_A , "vision" ) # Add new lines so that each line is no more than 80 characters. snake_case_ = textwrap.TextWrapper(width=80 ) snake_case_ = wrapper.wrap(text=_A ) snake_case_ = "\n".join(_A ) if font_bytes is not None and font_path is None: snake_case_ = io.BytesIO(_A ) elif font_path is not None: snake_case_ = font_path else: snake_case_ = hf_hub_download(_A , "Arial.TTF" ) snake_case_ = ImageFont.truetype(_A , encoding="UTF-8" , size=_A ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. snake_case_ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , _A ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , _A , _A ) # Create the actual image with a bit of padding around the text. snake_case_ = text_width + left_padding + right_padding snake_case_ = text_height + top_padding + bottom_padding snake_case_ = Image.new("RGB" , (image_width, image_height) , _A ) snake_case_ = ImageDraw.Draw(_A ) draw.text(xy=(left_padding, top_padding) , text=_A , fill=_A , font=_A ) return image def lowerCamelCase__ ( _A , _A , **_A ): '''simple docstring''' requires_backends(_A , "vision" ) # Convert to PIL image if necessary snake_case_ = to_pil_image(_A ) snake_case_ = render_text(_A , **_A ) snake_case_ = max(header_image.width , image.width ) snake_case_ = int(image.height * (new_width / image.width) ) snake_case_ = int(header_image.height * (new_width / header_image.width) ) snake_case_ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary snake_case_ = to_numpy_array(_A ) if infer_channel_dimension_format(_A ) == ChannelDimension.LAST: snake_case_ = to_channel_dimension_format(_A , ChannelDimension.LAST ) return new_image class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' lowerCAmelCase_ = ['''flattened_patches'''] def __init__( self : Optional[Any] , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : int = 20_48 , __lowercase : bool = False , **__lowercase : Dict , ): """simple docstring""" super().__init__(**__lowercase ) snake_case_ = patch_size if patch_size is not None else {"height": 16, "width": 16} snake_case_ = do_normalize snake_case_ = do_convert_rgb snake_case_ = max_patches snake_case_ = is_vqa def snake_case__ ( self : str , __lowercase : np.ndarray , __lowercase : int , __lowercase : dict , **__lowercase : Optional[Any] ): """simple docstring""" requires_backends(self.extract_flattened_patches , "torch" ) _check_torch_version() # convert to torch snake_case_ = to_channel_dimension_format(__lowercase , ChannelDimension.FIRST ) snake_case_ = torch.from_numpy(__lowercase ) snake_case_ , snake_case_ = patch_size["height"], patch_size["width"] snake_case_ , snake_case_ = get_image_size(__lowercase ) # maximize scale s.t. snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) snake_case_ = max(min(math.floor(scale * image_height / patch_height ) , __lowercase ) , 1 ) snake_case_ = max(min(math.floor(scale * image_width / patch_width ) , __lowercase ) , 1 ) snake_case_ = max(num_feasible_rows * patch_height , 1 ) snake_case_ = max(num_feasible_cols * patch_width , 1 ) snake_case_ = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=__lowercase , antialias=__lowercase , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] snake_case_ = torch_extract_patches(__lowercase , __lowercase , __lowercase ) snake_case_ = patches.shape snake_case_ = patches_shape[1] snake_case_ = patches_shape[2] snake_case_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] snake_case_ = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] snake_case_ = torch.arange(__lowercase ).reshape([rows, 1] ).repeat(1 , __lowercase ).reshape([rows * columns, 1] ) snake_case_ = torch.arange(__lowercase ).reshape([1, columns] ).repeat(__lowercase , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] snake_case_ = row_ids.to(torch.floataa ) snake_case_ = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.nn.functional.pad(__lowercase , [0, 0, 0, max_patches - (rows * columns)] ).float() snake_case_ = to_numpy_array(__lowercase ) return result def snake_case__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ): """simple docstring""" if image.dtype == np.uinta: snake_case_ = image.astype(np.floataa ) # take mean across the whole `image` snake_case_ = np.mean(__lowercase ) snake_case_ = np.std(__lowercase ) snake_case_ = max(__lowercase , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(__lowercase , mean=__lowercase , std=__lowercase , **__lowercase ) def snake_case__ ( self : Optional[Any] , __lowercase : ImageInput , __lowercase : Optional[str] = None , __lowercase : bool = None , __lowercase : Optional[bool] = None , __lowercase : Optional[int] = None , __lowercase : Optional[Dict[str, int]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : str , ): """simple docstring""" snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ = patch_size if patch_size is not None else self.patch_size snake_case_ = max_patches if max_patches is not None else self.max_patches snake_case_ = self.is_vqa if kwargs.get("data_format" , __lowercase ) is not None: raise ValueError("data_format is not an accepted input as the outputs are " ) snake_case_ = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ = [convert_to_rgb(__lowercase ) for image in images] # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(__lowercase ) for image in images] if is_vqa: if header_text is None: raise ValueError("A header text must be provided for VQA models." ) snake_case_ = kwargs.pop("font_bytes" , __lowercase ) snake_case_ = kwargs.pop("font_path" , __lowercase ) if isinstance(__lowercase , __lowercase ): snake_case_ = [header_text] * len(__lowercase ) snake_case_ = [ render_header(__lowercase , header_text[i] , font_bytes=__lowercase , font_path=__lowercase ) for i, image in enumerate(__lowercase ) ] if do_normalize: snake_case_ = [self.normalize(image=__lowercase ) for image in images] # convert to torch tensor and permute snake_case_ = [ self.extract_flattened_patches(image=__lowercase , max_patches=__lowercase , patch_size=__lowercase ) for image in images ] # create attention mask in numpy snake_case_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] snake_case_ = BatchFeature( data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=__lowercase ) return encoded_outputs
187
from torch import nn def lowerCamelCase__ ( _A ): '''simple docstring''' if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"Unsupported activation function: {act_fn}" )
187
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class snake_case__ ( unittest.TestCase ): def A_ ( self : Any ) -> Optional[int]: '''simple docstring''' __snake_case : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) __snake_case : int = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) ) self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) ) def A_ ( self : int ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) __snake_case : Optional[int] = get_activation('gelu' ) __snake_case : Union[str, Any] = get_activation('gelu_10' ) __snake_case : Tuple = torch_builtin(__a ) __snake_case : Tuple = geluaa(__a ) __snake_case : Union[str, Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(__a ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def A_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(__a ): get_activation('bogus' ) with self.assertRaises(__a ): get_activation(__a ) def A_ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = get_activation('gelu' ) __snake_case : Optional[Any] = 1 __snake_case : Tuple = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__a ): __snake_case : List[str] = acta.a
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging A__ : List[Any] = logging.get_logger(__name__) A__ : Tuple = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''t5''' A__ = ['''past_key_values'''] A__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , __a : Dict=32128 , __a : Dict=512 , __a : Union[str, Any]=64 , __a : str=2048 , __a : Union[str, Any]=6 , __a : Any=None , __a : Any=8 , __a : List[Any]=32 , __a : Any=128 , __a : Tuple=0.1 , __a : str=1e-6 , __a : Dict=1.0 , __a : Tuple="relu" , __a : Dict=True , __a : Union[str, Any]=True , __a : Any=0 , __a : Dict=1 , **__a : Union[str, Any] , ) -> Union[str, Any]: '''simple docstring''' __snake_case : int = vocab_size __snake_case : str = d_model __snake_case : str = d_kv __snake_case : List[Any] = d_ff __snake_case : List[str] = num_layers __snake_case : Tuple = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __snake_case : Union[str, Any] = num_heads __snake_case : Tuple = relative_attention_num_buckets __snake_case : Optional[int] = relative_attention_max_distance __snake_case : Optional[Any] = dropout_rate __snake_case : str = layer_norm_epsilon __snake_case : List[str] = initializer_factor __snake_case : int = feed_forward_proj __snake_case : Optional[Any] = use_cache __snake_case : Optional[Any] = self.feed_forward_proj.split('-' ) __snake_case : Dict = act_info[-1] __snake_case : List[str] = act_info[0] == 'gated' if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __snake_case : Dict = 'gelu_new' super().__init__( pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @property def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __snake_case : Union[str, Any] = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: __snake_case : Tuple = 'past_encoder_sequence + sequence' __snake_case : Dict = {0: 'batch'} __snake_case : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __snake_case : Tuple = {0: 'batch', 1: 'decoder_sequence'} __snake_case : int = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__a , direction='inputs' ) return common_inputs @property def A_ ( self : List[Any] ) -> int: '''simple docstring''' return 13
0
1
import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) UpperCAmelCase__ : Optional[Any] = parser.parse_args() UpperCAmelCase__ : List[Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
121
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]: super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str: if latents is None: _A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A : Union[str, Any] = latents.to(_a ) _A : int = latents * scheduler.init_noise_sigma return latents def a__ ( self , _a=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A : str = torch.device(F'''cuda:{gpu_id}''' ) _A : Any = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def a__ ( self ) -> List[Any]: if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def a__ ( self , _a , _a , _a , _a , ) -> Tuple: if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): _A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): _A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _A : int = image.to(dtype=self.image_encoder.dtype , device=_a ) _A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""] _A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _A : Dict = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: _A : str = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A : List[str] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]: if isinstance(_a , PIL.Image.Image ): _A : List[Any] = 1 elif isinstance(_a , torch.Tensor ): _A : Any = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _A : Union[str, Any] = len(_a ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) _A : Optional[int] = self._execution_device _A : Tuple = batch_size * num_images_per_prompt _A : List[Any] = guidance_scale > 1.0 _A : Optional[Any] = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) _A : Optional[int] = self.scheduler.timesteps _A : List[str] = self.prior.config.num_embeddings _A : int = self.prior.config.embedding_dim _A : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _A : List[Any] = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : int = self.scheduler.scale_model_input(_a , _a ) _A : Tuple = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance _A , _A : Optional[Any] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _A , _A : Dict = noise_pred.chunk(2 ) _A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _A : int = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) _A : List[str] = [] for i, latent in enumerate(_a ): print() _A : List[str] = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) _A : List[Any] = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _A : List[str] = images.cpu().numpy() if output_type == "pil": _A : List[Any] = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
26
0
from __future__ import annotations from functools import lru_cache from math import ceil a_ : Tuple = 1_00 a_ : Dict = set(range(3, NUM_PRIMES, 2)) primes.add(2) a_ : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100) def lowerCamelCase__ (_UpperCAmelCase): if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime): ret.add(sub * prime) return ret def lowerCamelCase__ (_UpperCAmelCase = 5000): for number_to_partition in range(1 , _UpperCAmelCase): if len(partition(_UpperCAmelCase)) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
362
from scipy.stats import pearsonr import datasets a_ : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' a_ : Optional[int] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' a_ : Any = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float'), 'references': datasets.Value('float'), }) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a=False) -> Optional[Any]: if return_pvalue: SCREAMING_SNAKE_CASE = pearsonr(a , a) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(a , a)[0])}
327
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __UpperCamelCase ( UpperCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = "trocr" lowerCAmelCase_ = ["past_key_values"] lowerCAmelCase_ = { "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__( self : Optional[int] , _A : str=5_0265 , _A : Union[str, Any]=1024 , _A : Optional[int]=12 , _A : Dict=16 , _A : Dict=4096 , _A : Union[str, Any]="gelu" , _A : List[Any]=512 , _A : int=0.1 , _A : Dict=0.0 , _A : int=0.0 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : List[Any]=0.0 , _A : Optional[int]=True , _A : Optional[int]=False , _A : List[str]=True , _A : Tuple=True , _A : Optional[Any]=1 , _A : List[str]=0 , _A : str=2 , **_A : Any , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = vocab_size __SCREAMING_SNAKE_CASE : int = d_model __SCREAMING_SNAKE_CASE : Optional[Any] = decoder_layers __SCREAMING_SNAKE_CASE : str = decoder_attention_heads __SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_ffn_dim __SCREAMING_SNAKE_CASE : str = activation_function __SCREAMING_SNAKE_CASE : Any = max_position_embeddings __SCREAMING_SNAKE_CASE : Union[str, Any] = dropout __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout __SCREAMING_SNAKE_CASE : Dict = activation_dropout __SCREAMING_SNAKE_CASE : List[Any] = init_std __SCREAMING_SNAKE_CASE : int = decoder_layerdrop __SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache __SCREAMING_SNAKE_CASE : Tuple = scale_embedding __SCREAMING_SNAKE_CASE : str = use_learned_position_embeddings __SCREAMING_SNAKE_CASE : Tuple = layernorm_embedding super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
303
def __SCREAMING_SNAKE_CASE ( snake_case_ ): '''simple docstring''' _UpperCAmelCase = len(snake_case_ ) for i in range(snake_case_ ): for j in range(i + 1 , snake_case_ ): if numbers[j] < numbers[i]: _UpperCAmelCase , _UpperCAmelCase = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowercase_ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip() lowercase_ : Dict = [int(item) for item in user_input.split(',')] print(exchange_sort(unsorted))
133
0
from __future__ import annotations def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> bool: """simple docstring""" _snake_case = get_failure_array(_UpperCamelCase ) # 2) Step through text searching for pattern _snake_case, _snake_case = 0, 0 # index into text, pattern while i < len(_UpperCamelCase ): if pattern[j] == text[i]: if j == (len(_UpperCamelCase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _snake_case = failure[j - 1] continue i += 1 return False def snake_case_(_UpperCamelCase ) -> list[int]: """simple docstring""" _snake_case = [0] _snake_case = 0 _snake_case = 1 while j < len(_UpperCamelCase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _snake_case = failure[i - 1] continue j += 1 failure.append(_UpperCamelCase ) return failure if __name__ == "__main__": # Test 1) __A = '''abc1abc12''' __A = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' __A = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __A = '''ABABX''' __A = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) __A = '''AAAB''' __A = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) __A = '''abcdabcy''' __A = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) __A = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
278
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __A = random.Random() def snake_case_(_UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[int]: """simple docstring""" if rng is None: _snake_case = global_rng _snake_case = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowercase_ ( unittest.TestCase ): def __init__( self : List[Any] , A__ : List[Any] , A__ : int=7 , A__ : Tuple=400 , A__ : int=2000 , A__ : Any=2048 , A__ : List[Any]=128 , A__ : Optional[int]=1 , A__ : Optional[Any]=512 , A__ : Any=30 , A__ : Any=44100 , ) -> int: _snake_case = parent _snake_case = batch_size _snake_case = min_seq_length _snake_case = max_seq_length _snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _snake_case = spectrogram_length _snake_case = feature_size _snake_case = num_audio_channels _snake_case = hop_length _snake_case = chunk_length _snake_case = sampling_rate def UpperCamelCase_ ( self : str ) -> Optional[int]: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def UpperCamelCase_ ( self : Any , A__ : Any=False , A__ : List[str]=False ) -> Tuple: def _flatten(A__ : List[str] ): return list(itertools.chain(*A__ ) ) if equal_length: _snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _snake_case = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _snake_case = [np.asarray(A__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowercase_ ( __lowercase , unittest.TestCase ): UpperCamelCase_ : Optional[int] = TvltFeatureExtractor def UpperCamelCase_ ( self : Dict ) -> List[str]: _snake_case = TvltFeatureExtractionTester(self ) def UpperCamelCase_ ( self : int ) -> Optional[int]: _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(A__ , '''spectrogram_length''' ) ) self.assertTrue(hasattr(A__ , '''feature_size''' ) ) self.assertTrue(hasattr(A__ , '''num_audio_channels''' ) ) self.assertTrue(hasattr(A__ , '''hop_length''' ) ) self.assertTrue(hasattr(A__ , '''chunk_length''' ) ) self.assertTrue(hasattr(A__ , '''sampling_rate''' ) ) def UpperCamelCase_ ( self : Any ) -> Union[str, Any]: _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = feat_extract_first.save_pretrained(A__ )[0] check_json_file_has_correct_format(A__ ) _snake_case = self.feature_extraction_class.from_pretrained(A__ ) _snake_case = feat_extract_first.to_dict() _snake_case = feat_extract_second.to_dict() _snake_case = dict_first.pop('''mel_filters''' ) _snake_case = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(A__ , A__ ) ) self.assertEqual(A__ , A__ ) def UpperCamelCase_ ( self : int ) -> Union[str, Any]: _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = os.path.join(A__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(A__ ) _snake_case = self.feature_extraction_class.from_json_file(A__ ) _snake_case = feat_extract_first.to_dict() _snake_case = feat_extract_second.to_dict() _snake_case = dict_first.pop('''mel_filters''' ) _snake_case = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(A__ , A__ ) ) self.assertEqual(A__ , A__ ) def UpperCamelCase_ ( self : Union[str, Any] ) -> Any: # Initialize feature_extractor _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = [np.asarray(A__ ) for speech_input in speech_inputs] # Test not batched input _snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _snake_case = feature_extractor(A__ , return_tensors='''np''' , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _snake_case = feature_extractor( A__ , return_tensors='''np''' , sampling_rate=44100 , mask_audio=A__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)] _snake_case = np.asarray(A__ ) _snake_case = feature_extractor(A__ , return_tensors='''np''' , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def UpperCamelCase_ ( self : Optional[Any] , A__ : Any ) -> Optional[int]: _snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech _snake_case = ds.sort('''id''' ).select(range(A__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def UpperCamelCase_ ( self : List[str] ) -> Optional[Any]: _snake_case = self._load_datasamples(1 ) _snake_case = TvltFeatureExtractor() _snake_case = feature_extractor(A__ , return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 192, 128) ) _snake_case = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A__ , atol=1e-4 ) )
278
1
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class _snake_case : def __init__( self , a__ , a__=3 , a__=32 , a__=3 , a__=10 , a__=[8, 16, 32, 64] , a__=[1, 1, 2, 1] , a__=True , a__=True , a__="relu" , a__=3 , a__=None , a__=["stage2", "stage3", "stage4"] , a__=[2, 3, 4] , a__=1 , ) -> List[Any]: '''simple docstring''' snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = num_channels snake_case_ = embeddings_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_act snake_case_ = num_labels snake_case_ = scope snake_case_ = len(a__ ) snake_case_ = out_features snake_case_ = out_indices snake_case_ = num_groups def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ = self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Optional[Any]: '''simple docstring''' snake_case_ = BitModel(config=a__ ) model.to(a__ ) model.eval() snake_case_ = model(a__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> List[Any]: '''simple docstring''' snake_case_ = self.num_labels snake_case_ = BitForImageClassification(a__ ) model.to(a__ ) model.eval() snake_case_ = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Optional[int]: '''simple docstring''' snake_case_ = BitBackbone(config=a__ ) model.to(a__ ) model.eval() snake_case_ = model(a__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case_ = None snake_case_ = BitBackbone(config=a__ ) model.to(a__ ) model.eval() snake_case_ = model(a__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ): lowerCAmelCase_ : str = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () lowerCAmelCase_ : int = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) lowerCAmelCase_ : Union[str, Any] = False lowerCAmelCase_ : Optional[int] = False lowerCAmelCase_ : Any = False lowerCAmelCase_ : Optional[Any] = False lowerCAmelCase_ : Optional[Any] = False def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = BitModelTester(self ) snake_case_ = ConfigTester(self , config_class=a__ , has_text_modality=a__ ) def lowerCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return @unittest.skip(reason="Bit does not output attentions" ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' pass def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(a__ ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , a__ ) def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a__ ) def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(config=a__ ) for name, module in model.named_modules(): if isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' def check_hidden_states_output(a__ , a__ , a__ ): snake_case_ = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(a__ , a__ ) ) snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case_ = self.model_tester.num_stages self.assertEqual(len(a__ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case_ = layer_type snake_case_ = True check_hidden_states_output(a__ , a__ , a__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True check_hidden_states_output(a__ , a__ , a__ ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' pass def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) @slow def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = BitModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def UpperCamelCase_( ): '''simple docstring''' snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' snake_case_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a__ ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ ) # forward pass with torch.no_grad(): snake_case_ = model(**a__ ) # verify the logits snake_case_ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a__ ) snake_case_ = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) ) @require_torch class _snake_case ( lowercase_ , unittest.TestCase ): lowerCAmelCase_ : int = (BitBackbone,) if is_torch_available() else () lowerCAmelCase_ : Optional[Any] = BitConfig lowerCAmelCase_ : Optional[Any] = False def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = BitModelTester(self )
85
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _SCREAMING_SNAKE_CASE : Union[str, Any] = False class _snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' return 12 @property def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' return 12 @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' return 32 @property def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(a__ ) @property def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = 12 snake_case_ = 12 snake_case_ = { "attention_bias": True, "cross_attention_dim": 32, "attention_head_dim": height * width, "num_attention_heads": 1, "num_vector_embeds": self.num_embed, "num_embeds_ada_norm": self.num_embeds_ada_norm, "norm_num_groups": 32, "sample_size": width, "activation_fn": "geglu-approximate", } snake_case_ = TransformeraDModel(**a__ ) return model def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = "cpu" snake_case_ = self.dummy_vqvae snake_case_ = self.dummy_text_encoder snake_case_ = self.dummy_tokenizer snake_case_ = self.dummy_transformer snake_case_ = VQDiffusionScheduler(self.num_embed ) snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ ) snake_case_ = VQDiffusionPipeline( vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , ) snake_case_ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) snake_case_ = "teddy bear playing in the pool" snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" ) snake_case_ = output.images snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe( [prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0] snake_case_ = image[0, -3:, -3:, -1] snake_case_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case_ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ = "cpu" snake_case_ = self.dummy_vqvae snake_case_ = self.dummy_text_encoder snake_case_ = self.dummy_tokenizer snake_case_ = self.dummy_transformer snake_case_ = VQDiffusionScheduler(self.num_embed ) snake_case_ = LearnedClassifierFreeSamplingEmbeddings( learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) snake_case_ = VQDiffusionPipeline( vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , ) snake_case_ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) snake_case_ = "teddy bear playing in the pool" snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" ) snake_case_ = output.images snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe( [prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0] snake_case_ = image[0, -3:, -3:, -1] snake_case_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case_ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" ) snake_case_ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" ) snake_case_ = pipeline.to(a__ ) pipeline.set_progress_bar_config(disable=a__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipeline( "teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a__ , output_type="np" , ) snake_case_ = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
85
1
"""simple docstring""" import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser UpperCAmelCase = re.compile(R'''\s+''') def lowerCamelCase (a_ :Dict) -> Optional[int]: return {"hash": hashlib.mda(re.sub(a_ , '''''' , example['''content''']).encode('''utf-8''')).hexdigest()} def lowerCamelCase (a_ :Tuple) -> Optional[int]: lowercase :List[Any] = [len(a_) for line in example['''content'''].splitlines()] return {"line_mean": np.mean(a_), "line_max": max(a_)} def lowerCamelCase (a_ :Optional[int]) -> Optional[int]: lowercase :List[Any] = np.mean([c.isalnum() for c in example['''content''']]) return {"alpha_frac": alpha_frac} def lowerCamelCase (a_ :Union[str, Any] , a_ :List[Any]) -> List[str]: if example["hash"] in uniques: uniques.remove(example['''hash''']) return True else: return False def lowerCamelCase (a_ :List[str] , a_ :List[str]=5) -> Optional[Any]: lowercase :List[str] = ['''auto-generated''', '''autogenerated''', '''automatically generated'''] lowercase :List[Any] = example['''content'''].splitlines() for _, line in zip(range(a_) , a_): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def lowerCamelCase (a_ :str , a_ :Optional[int]=5 , a_ :Optional[Any]=0.05) -> Tuple: lowercase :Union[str, Any] = ['''unit tests''', '''test file''', '''configuration file'''] lowercase :Tuple = example['''content'''].splitlines() lowercase :List[Any] = 0 lowercase :Optional[Any] = 0 # first test for _, line in zip(range(a_) , a_): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test lowercase :Tuple = example['''content'''].count('''\n''') lowercase :List[Any] = int(coeff * nlines) for line in lines: count_config += line.lower().count('''config''') count_test += line.lower().count('''test''') if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def lowerCamelCase (a_ :str) -> List[Any]: lowercase :List[Any] = ['''def ''', '''class ''', '''for ''', '''while '''] lowercase :Tuple = example['''content'''].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def lowerCamelCase (a_ :Any , a_ :Optional[int]=4) -> List[Any]: lowercase :Tuple = example['''content'''].splitlines() lowercase :Optional[int] = 0 for line in lines: counter += line.lower().count('''=''') if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def lowerCamelCase (a_ :str) -> Any: lowercase :List[Any] = tokenizer(example['''content'''] , truncation=a_)['''input_ids'''] lowercase :List[str] = len(example['''content''']) / len(a_) return {"ratio": ratio} def lowerCamelCase (a_ :Optional[int]) -> Tuple: lowercase :List[str] = {} results.update(get_hash(a_)) results.update(line_stats(a_)) results.update(alpha_stats(a_)) results.update(char_token_ratio(a_)) results.update(is_autogenerated(a_)) results.update(is_config_or_test(a_)) results.update(has_no_keywords(a_)) results.update(has_few_assignments(a_)) return results def lowerCamelCase (a_ :Optional[int] , a_ :int , a_ :Optional[int]) -> List[str]: if not check_uniques(a_ , a_): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def lowerCamelCase (a_ :List[str]) -> int: with open(a_ , '''rb''') as f_in: with gzip.open(str(a_) + '''.gz''' , '''wb''' , compresslevel=6) as f_out: shutil.copyfileobj(a_ , a_) os.unlink(a_) # Settings UpperCAmelCase = HfArgumentParser(PreprocessingArguments) UpperCAmelCase = parser.parse_args() if args.num_workers is None: UpperCAmelCase = multiprocessing.cpu_count() UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset UpperCAmelCase = time.time() UpperCAmelCase = load_dataset(args.dataset_name, split='''train''') print(F"""Time to load dataset: {time.time()-t_start:.2f}""") # Run preprocessing UpperCAmelCase = time.time() UpperCAmelCase = ds.map(preprocess, num_proc=args.num_workers) print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""") # Deduplicate hashes UpperCAmelCase = set(ds.unique('''hash''')) UpperCAmelCase = len(uniques) / len(ds) print(F"""Fraction of duplicates: {1-frac:.2%}""") # Deduplicate data and apply heuristics UpperCAmelCase = time.time() UpperCAmelCase = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F"""Time to filter dataset: {time.time()-t_start:.2f}""") print(F"""Size of filtered dataset: {len(ds_filter)}""") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: UpperCAmelCase = time.time() UpperCAmelCase , UpperCAmelCase = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""") print(F"""Size of deduplicate dataset: {len(ds_filter)}""") # Save data in batches of samples_per_file UpperCAmelCase = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) UpperCAmelCase = output_dir / '''data''' data_dir.mkdir(exist_ok=True) UpperCAmelCase = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): UpperCAmelCase = str(data_dir / F"""file-{file_number+1:012}.json""") UpperCAmelCase = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
368
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase (a_ :Optional[int] , a_ :Union[str, Any] , a_ :Optional[Any]=None) -> List[Any]: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match""" lowercase :int = nn.Parameter(a_) if bias is not None: assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match""" lowercase :Tuple = nn.Parameter(a_) def lowerCamelCase (a_ :int , a_ :Any , a_ :Optional[int]) -> List[Any]: # set torch weights for 1-to-1 comparison lowercase :str = np.asarray(weights[0]) lowercase :List[Any] = np.asarray(weights[1]) lowercase :Optional[int] = np.asarray(weights[2]) set_param( torch_layer.self_attention.query_key , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , ) set_param( torch_layer.self_attention.value , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , ) set_param( torch_layer.output.dense , torch.tensor(a_).view(-1 , a_).contiguous().transpose(0 , 1) , ) def lowerCamelCase (a_ :str , a_ :Any , a_ :Union[str, Any]) -> Dict: # set torch weights for 1-to-1 comparison lowercase :str = np.asarray(weights[0]) lowercase :Dict = np.asarray(weights[1]) lowercase :Dict = np.asarray(weights[2]) lowercase :Optional[Any] = np.asarray(weights[3]) set_param( torch_layer.self_attention.query , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , ) set_param( torch_layer.self_attention.key , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , ) set_param( torch_layer.self_attention.value , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , ) set_param( torch_layer.output.dense , torch.tensor(a_).view(-1 , a_).contiguous().transpose(0 , 1) , ) def lowerCamelCase (a_ :Union[str, Any] , a_ :Dict , a_ :Optional[int]) -> Optional[Any]: # layernorm 1 lowercase :Optional[int] = weights[0][0][0] lowercase :Union[str, Any] = np.asarray(layer_norm_a[0]) lowercase :List[str] = np.asarray(layer_norm_a[1]) set_param( torch_block.attention.layer_norm , torch.tensor(a_) , torch.tensor(a_) , ) # lsh weights + output lowercase :Optional[Any] = weights[0][1] if len(a_) < 4: set_layer_weights_in_torch_lsh(a_ , torch_block.attention , a_) else: set_layer_weights_in_torch_local(a_ , torch_block.attention , a_) # intermediate weighs lowercase :Optional[int] = weights[2][0][1][2] # Chunked Feed Forward if len(a_) == 4: lowercase :int = intermediate_weights[2] # layernorm 2 lowercase :int = np.asarray(intermediate_weights[0][0]) lowercase :Union[str, Any] = np.asarray(intermediate_weights[0][1]) set_param( torch_block.feed_forward.layer_norm , torch.tensor(a_) , torch.tensor(a_) , ) # intermediate dense lowercase :Dict = np.asarray(intermediate_weights[1][0]) lowercase :Optional[Any] = np.asarray(intermediate_weights[1][1]) set_param( torch_block.feed_forward.dense.dense , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , ) # intermediate out lowercase :Union[str, Any] = np.asarray(intermediate_weights[4][0]) lowercase :Tuple = np.asarray(intermediate_weights[4][1]) set_param( torch_block.feed_forward.output.dense , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , ) def lowerCamelCase (a_ :Tuple , a_ :Dict , a_ :Tuple) -> str: # reformer model lowercase :Union[str, Any] = torch_model.reformer # word embeds lowercase :Tuple = np.asarray(weights[1]) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(a_) , ) if isinstance(weights[3] , a_): lowercase :str = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights)): lowercase :List[str] = np.asarray(weights[3][emb_idx][0]) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F"""{position_embeddings[emb_idx]} emb does not match""" lowercase :int = nn.Parameter(torch.tensor(a_)) lowercase :Dict = weights[5] assert len(torch_model_reformer.encoder.layers) * 4 == len( a_), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers): lowercase :Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(a_ , a_ , a_) # output layer norm lowercase :Dict = np.asarray(weights[7][0]) lowercase :Optional[Any] = np.asarray(weights[7][1]) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(a_) , torch.tensor(a_) , ) # output embeddings lowercase :str = np.asarray(weights[9][0]) lowercase :Union[str, Any] = np.asarray(weights[9][1]) set_param( torch_model.lm_head.decoder , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , ) def lowerCamelCase (a_ :Optional[Any] , a_ :List[Any] , a_ :Tuple) -> Union[str, Any]: # Initialise PyTorch model lowercase :Optional[Any] = ReformerConfig.from_json_file(a_) print(F"""Building PyTorch model from configuration: {config}""") lowercase :Dict = ReformerModelWithLMHead(a_) with open(a_ , '''rb''') as f: lowercase :Tuple = pickle.load(a_)['''weights'''] set_model_weights_in_torch(a_ , a_ , config.hidden_size) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""") torch.save(model.state_dict() , a_) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCAmelCase = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
172
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): def __init__(self : int , *__a : Dict , **__a : str ): warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
1
"""simple docstring""" from math import isqrt, loga def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list[int]: '''simple docstring''' lowercase_ = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __lowerCAmelCase , __lowerCAmelCase ): lowercase_ = False return [i for i in range(2 , __lowerCAmelCase ) if is_prime[i]] def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 80_08_00 , __lowerCAmelCase = 80_08_00 ) -> int: '''simple docstring''' lowercase_ = degree * loga(__lowerCAmelCase ) lowercase_ = int(__lowerCAmelCase ) lowercase_ = calculate_prime_numbers(__lowerCAmelCase ) lowercase_ = 0 lowercase_ = 0 lowercase_ = len(__lowerCAmelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"{solution() = }")
136
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) lowerCamelCase :Optional[Any] = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Any = """sew-d""" def __init__(self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase=2 , lowercase=512 , lowercase=256 , lowercase=True , lowercase=True , lowercase=("p2c", "c2p") , lowercase="layer_norm" , lowercase="gelu_python" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-7 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase=False , lowercase=128 , lowercase=16 , lowercase=True , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=256 , lowercase=0 , lowercase=1 , lowercase=2 , **lowercase , ): super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ ) A_ : Tuple = hidden_size A_ : Dict = feat_extract_norm A_ : Tuple = feat_extract_activation A_ : str = list(lowercase_ ) A_ : Tuple = list(lowercase_ ) A_ : List[str] = list(lowercase_ ) A_ : Dict = conv_bias A_ : int = num_conv_pos_embeddings A_ : str = num_conv_pos_embedding_groups A_ : int = len(self.conv_dim ) A_ : List[str] = num_hidden_layers A_ : Optional[int] = intermediate_size A_ : Tuple = squeeze_factor A_ : Tuple = max_position_embeddings A_ : Tuple = position_buckets A_ : Optional[int] = share_att_key A_ : Dict = relative_attention A_ : List[Any] = norm_rel_ebd A_ : Optional[int] = list(lowercase_ ) A_ : List[str] = hidden_act A_ : Union[str, Any] = num_attention_heads A_ : List[str] = hidden_dropout A_ : Optional[int] = attention_dropout A_ : Optional[Any] = activation_dropout A_ : Tuple = feat_proj_dropout A_ : Optional[int] = final_dropout A_ : Optional[int] = layer_norm_eps A_ : Union[str, Any] = feature_layer_norm_eps A_ : Optional[Any] = initializer_range A_ : int = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : Optional[int] = apply_spec_augment A_ : Dict = mask_time_prob A_ : Optional[Any] = mask_time_length A_ : List[str] = mask_time_min_masks A_ : Dict = mask_feature_prob A_ : int = mask_feature_length A_ : Any = mask_feature_min_masks # ctc loss A_ : Tuple = ctc_loss_reduction A_ : Tuple = ctc_zero_infinity # sequence classification A_ : Any = use_weighted_layer_sum A_ : Union[str, Any] = classifier_proj_size @property def _a (self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
351
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Dict = 10 def _a (self ): A_ : str = [1, 2, 3, 4] A_ : List[str] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase ) def _a (self ): A_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] A_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase ) def _a (self ): A_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] A_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase ) def _a (self ): A_ : Any = """It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" A_, A_ : Optional[int] = process_story(lowercase ) self.assertEqual(lowercase , [] ) def _a (self ): A_ : Union[str, Any] = """""" A_, A_ : Union[str, Any] = process_story(lowercase ) self.assertEqual(lowercase , [] ) self.assertEqual(lowercase , [] ) def _a (self ): A_ : List[Any] = ( """It was the year of Our Lord one thousand seven hundred and """ """seventy-five\n\nSpiritual revelations were conceded to England """ """at that favoured period, as at this.\n@highlight\n\nIt was the best of times""" ) A_, A_ : int = process_story(lowercase ) A_ : List[Any] = [ """It was the year of Our Lord one thousand seven hundred and seventy-five.""", """Spiritual revelations were conceded to England at that favoured period, as at this.""", ] self.assertEqual(lowercase , lowercase ) A_ : Tuple = ["""It was the best of times."""] self.assertEqual(lowercase , lowercase ) def _a (self ): A_ : Dict = torch.tensor([1, 2, 3, 4] ) A_ : Union[str, Any] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(lowercase , 0 ).numpy() , expected.numpy() ) def _a (self ): A_ : Union[str, Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) A_ : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(lowercase , 23 ).numpy() , expected.numpy() ) def _a (self ): A_ : Optional[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) A_ : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(lowercase , 1 ).numpy() , expected.numpy() ) def _a (self ): A_ : List[str] = 101 A_ : str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) A_ : int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) A_ : Optional[Any] = compute_token_type_ids(lowercase , lowercase ) np.testing.assert_array_equal(lowercase , lowercase )
135
0
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) lowercase__ :Union[str, Any] = logging.getLogger(__name__) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = git.Repo(search_parent_directories=lowerCAmelCase__ ) lowercase = { '''repo_id''': str(lowerCAmelCase__ ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(lowerCAmelCase__ , '''git_log.json''' ) , '''w''' ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=4 ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if params.n_gpu <= 0: lowercase = 0 lowercase = -1 lowercase = True lowercase = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 lowercase = int(os.environ['''WORLD_SIZE'''] ) lowercase = int(os.environ['''N_GPU_NODE'''] ) lowercase = int(os.environ['''RANK'''] ) # number of nodes / node ID lowercase = params.world_size // params.n_gpu_per_node lowercase = params.global_rank // params.n_gpu_per_node lowercase = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 lowercase = 1 lowercase = 0 lowercase = 0 lowercase = 0 lowercase = 1 lowercase = 1 lowercase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowercase = params.node_id == 0 and params.local_rank == 0 lowercase = params.n_nodes > 1 # summary lowercase = f'--- Global rank: {params.global_rank} - ' logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
101
"""simple docstring""" import qiskit def __lowerCamelCase ( a_ : int , a_ : int ) -> qiskit.result.counts.Counts: __SCREAMING_SNAKE_CASE :Tuple = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register __SCREAMING_SNAKE_CASE :Union[str, Any] = qiskit.QuantumCircuit(a_ , a_ ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator __SCREAMING_SNAKE_CASE :Tuple = qiskit.execute(a_ , a_ , shots=10_00 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(a_ ) if __name__ == "__main__": lowerCamelCase_ = single_qubit_measure(2, 2) print(f'Total count for various states are: {counts}')
191
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case : List[Any] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Tuple = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[int] = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : str = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Dict = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
365
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def __lowerCamelCase ( __snake_case : int ) -> int: """simple docstring""" A__ : List[Any] =prime_factors(__snake_case ) if is_square_free(__snake_case ): return -1 if len(__snake_case ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
136
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) a = get_activation('''gelu''' ) self.assertTrue(torch.allclose(gelu_python(__UpperCAmelCase ) , torch_builtin(__UpperCAmelCase ) ) ) self.assertFalse(torch.allclose(gelu_python(__UpperCAmelCase ) , gelu_new(__UpperCAmelCase ) ) ) def __lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) a = get_activation('''gelu''' ) a = get_activation('''gelu_10''' ) a = torch_builtin(__UpperCAmelCase ) a = geluaa(__UpperCAmelCase ) a = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(__UpperCAmelCase ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def __lowerCAmelCase ( self : List[Any] ) ->Optional[int]: """simple docstring""" get_activation('''gelu''' ) get_activation('''gelu_10''' ) get_activation('''gelu_fast''' ) get_activation('''gelu_new''' ) get_activation('''gelu_python''' ) get_activation('''gelu_pytorch_tanh''' ) get_activation('''linear''' ) get_activation('''mish''' ) get_activation('''quick_gelu''' ) get_activation('''relu''' ) get_activation('''sigmoid''' ) get_activation('''silu''' ) get_activation('''swish''' ) get_activation('''tanh''' ) with self.assertRaises(__UpperCAmelCase ): get_activation('''bogus''' ) with self.assertRaises(__UpperCAmelCase ): get_activation(__UpperCAmelCase ) def __lowerCAmelCase ( self : Any ) ->str: """simple docstring""" a = get_activation('''gelu''' ) a = 1 a = get_activation('''gelu''' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__UpperCAmelCase ): a = acta.a
0
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = '''t5''' __snake_case = ['''past_key_values'''] __snake_case = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=32_128 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Dict=64 , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : int=6 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Tuple=128 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=1e-6 , __UpperCAmelCase : int=1.0 , __UpperCAmelCase : List[str]="relu" , __UpperCAmelCase : int=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , **__UpperCAmelCase : str , ) ->Optional[Any]: """simple docstring""" a = vocab_size a = d_model a = d_kv a = d_ff a = num_layers a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a = num_heads a = relative_attention_num_buckets a = relative_attention_max_distance a = dropout_rate a = layer_norm_epsilon a = initializer_factor a = feed_forward_proj a = use_cache a = self.feed_forward_proj.split('''-''' ) a = act_info[-1] a = act_info[0] == '''gated''' if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a = '''gelu_new''' super().__init__( pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , ) class lowercase_ ( lowercase ): '''simple docstring''' @property def __lowerCAmelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" a = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: a = '''past_encoder_sequence + sequence''' a = {0: '''batch'''} a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: a = {0: '''batch''', 1: '''decoder_sequence'''} a = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) return common_inputs @property def __lowerCAmelCase ( self : Union[str, Any] ) ->int: """simple docstring""" return 13
0
1
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = 2 while True: if is_prime(_UpperCamelCase ): yield num num += 1 def _lowerCamelCase ( _UpperCamelCase = 200_0000 ): '''simple docstring''' return sum(takewhile(lambda _UpperCamelCase : x < n , prime_generator() ) ) if __name__ == "__main__": print(f'''{solution() = }''')
259
"""simple docstring""" import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=() , _UpperCamelCase=None , _UpperCamelCase="no" , _UpperCamelCase="29500" ): '''simple docstring''' __lowerCAmelCase = False __lowerCAmelCase = False if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ): __lowerCAmelCase = True elif "IPython" in sys.modules: __lowerCAmelCase = "google.colab" in str(sys.modules["IPython"].get_ipython() ) try: __lowerCAmelCase = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , _UpperCamelCase ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: __lowerCAmelCase = 8 __lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , distributed_type="TPU" ) print(f"Launching a training on {num_processes} TPU cores." ) xmp.spawn(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on one CPU." ) function(*_UpperCamelCase ) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if torch.cuda.is_initialized(): raise ValueError( "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " "function." ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_UpperCamelCase , master_addr="127.0.01" , master_port=_UpperCamelCase , mixed_precision=_UpperCamelCase ): __lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , distributed_type="MULTI_GPU" ) print(f"Launching training on {num_processes} GPUs." ) try: start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic." ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): __lowerCAmelCase = "1" print("Launching training on MPS." ) elif torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on CPU." ) function(*_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=() , _UpperCamelCase=2 ): '''simple docstring''' from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_UpperCamelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ): __lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , debug=_UpperCamelCase ) start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" )
259
1
"""simple docstring""" import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def a_ ( lowerCamelCase ): return EnvironmentCommand() class snake_case ( __UpperCAmelCase ): """simple docstring""" @staticmethod def __lowerCAmelCase ( lowerCamelCase__ : ArgumentParser ): UpperCAmelCase__ = parser.add_parser('env' ) download_parser.set_defaults(func=lowerCamelCase__ ) def __lowerCAmelCase ( self : Optional[int] ): UpperCAmelCase__ = huggingface_hub.__version__ UpperCAmelCase__ = 'not installed' UpperCAmelCase__ = 'NA' if is_torch_available(): import torch UpperCAmelCase__ = torch.__version__ UpperCAmelCase__ = torch.cuda.is_available() UpperCAmelCase__ = 'not installed' if is_transformers_available(): import transformers UpperCAmelCase__ = transformers.__version__ UpperCAmelCase__ = 'not installed' if is_accelerate_available(): import accelerate UpperCAmelCase__ = accelerate.__version__ UpperCAmelCase__ = 'not installed' if is_xformers_available(): import xformers UpperCAmelCase__ = xformers.__version__ UpperCAmelCase__ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': f'''{pt_version} ({pt_cuda_available})''', 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(lowerCamelCase__ ) ) return info @staticmethod def __lowerCAmelCase ( lowerCamelCase__ : Any ): return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
98
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer A__ : List[Any] = logging.get_logger(__name__) A__ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} A__ : int = { 'vocab_file': { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json' ), 'distilbert-base-german-cased': ( 'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json' ), 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json' ), }, } A__ : Optional[Any] = { 'distilbert-base-uncased': 5_12, 'distilbert-base-uncased-distilled-squad': 5_12, 'distilbert-base-cased': 5_12, 'distilbert-base-cased-distilled-squad': 5_12, 'distilbert-base-german-cased': 5_12, 'distilbert-base-multilingual-cased': 5_12, } A__ : List[str] = { 'distilbert-base-uncased': {'do_lower_case': True}, 'distilbert-base-uncased-distilled-squad': {'do_lower_case': True}, 'distilbert-base-cased': {'do_lower_case': False}, 'distilbert-base-cased-distilled-squad': {'do_lower_case': False}, 'distilbert-base-german-cased': {'do_lower_case': False}, 'distilbert-base-multilingual-cased': {'do_lower_case': False}, } class _UpperCAmelCase ( A__ ): """simple docstring""" lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = PRETRAINED_INIT_CONFIGURATION lowercase__ = ["""input_ids""", """attention_mask"""] lowercase__ = DistilBertTokenizer def __init__( self : List[Any], lowerCamelCase : List[Any]=None, lowerCamelCase : Dict=None, lowerCamelCase : str=True, lowerCamelCase : Optional[int]="[UNK]", lowerCamelCase : Optional[Any]="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Any="[CLS]", lowerCamelCase : Union[str, Any]="[MASK]", lowerCamelCase : str=True, lowerCamelCase : int=None, **lowerCamelCase : Union[str, Any], ): '''simple docstring''' super().__init__( lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, ) lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars ): lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) ) lowercase__ = do_lower_case lowercase__ = strip_accents lowercase__ = tokenize_chinese_chars lowercase__ = normalizer_class(**lowerCamelCase ) lowercase__ = do_lower_case def lowercase__ ( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : List[Any]=None ): '''simple docstring''' lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : str, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ): '''simple docstring''' lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase ) return tuple(lowerCamelCase )
207
0
'''simple docstring''' import numpy as np from transformers import Pipeline def lowercase__( __UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = np.max(__UpperCamelCase ,axis=-1 ,keepdims=__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=__UpperCamelCase ) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = {} if "second_text" in kwargs: SCREAMING_SNAKE_CASE : List[Any] = kwargs['second_text'] return preprocess_kwargs, {}, {} def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' return self.tokenizer(A, text_pair=A, return_tensors=self.framework ) def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.model(**A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = model_outputs.logits[0].numpy() SCREAMING_SNAKE_CASE : Union[str, Any] = softmax(A ) SCREAMING_SNAKE_CASE : Optional[Any] = np.argmax(A ) SCREAMING_SNAKE_CASE : List[str] = self.model.config.idalabel[best_class] SCREAMING_SNAKE_CASE : str = probabilities[best_class].item() SCREAMING_SNAKE_CASE : List[Any] = logits.tolist() return {"label": label, "score": score, "logits": logits}
350
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
246
0
'''simple docstring''' import argparse import datetime def snake_case_ ( lowerCAmelCase_ )-> str: '''simple docstring''' _UpperCAmelCase : Dict = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } _UpperCAmelCase : Union[str, Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowerCAmelCase_ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month _UpperCAmelCase : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) _UpperCAmelCase : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day _UpperCAmelCase : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator _UpperCAmelCase : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year _UpperCAmelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation _UpperCAmelCase : Tuple = datetime.date(int(lowerCAmelCase_ ) , int(lowerCAmelCase_ ) , int(lowerCAmelCase_ ) ) # Start math if m <= 2: _UpperCAmelCase : str = y - 1 _UpperCAmelCase : Union[str, Any] = m + 12 # maths var _UpperCAmelCase : int = int(str(lowerCAmelCase_ )[:2] ) _UpperCAmelCase : int = int(str(lowerCAmelCase_ )[2:] ) _UpperCAmelCase : int = int(2.6 * m - 5.3_9 ) _UpperCAmelCase : int = int(c / 4 ) _UpperCAmelCase : int = int(k / 4 ) _UpperCAmelCase : int = int(d + k ) _UpperCAmelCase : int = int(t + u + v + x ) _UpperCAmelCase : int = int(z - (2 * c) ) _UpperCAmelCase : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response _UpperCAmelCase : str = F'''Your date {date_input}, is a {days[str(lowerCAmelCase_ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() A_ : Any = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) A_ : Union[str, Any] = parser.parse_args() zeller(args.date_input)
215
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class lowercase ( unittest.TestCase , _lowerCamelCase ): """simple docstring""" def _snake_case ( self ) -> Any: _UpperCAmelCase : int = load_tool("""text-classification""" ) self.tool.setup() _UpperCAmelCase : Tuple = load_tool("""text-classification""" ,remote=a_ ) def _snake_case ( self ) -> Union[str, Any]: _UpperCAmelCase : Tuple = self.tool("""That's quite cool""" ,["""positive""", """negative"""] ) self.assertEqual(a_ ,"""positive""" ) def _snake_case ( self ) -> Any: _UpperCAmelCase : Dict = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] ) self.assertEqual(a_ ,"""positive""" ) def _snake_case ( self ) -> str: _UpperCAmelCase : List[Any] = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] ) self.assertEqual(a_ ,"""positive""" ) def _snake_case ( self ) -> Union[str, Any]: _UpperCAmelCase : Any = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] ) self.assertEqual(a_ ,"""positive""" )
215
1
'''simple docstring''' import torch from diffusers import DiffusionPipeline class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Any , __a : Optional[Any] , __a : int ) -> Union[str, Any]: '''simple docstring''' super().__init__() self.register_modules(unet=__a , scheduler=__a ) def __call__( self : Optional[Any] ) -> List[str]: '''simple docstring''' __snake_case : str = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) __snake_case : Tuple = 1 __snake_case : Optional[Any] = self.unet(__a , __a ).sample __snake_case : Any = self.scheduler.step(__a , __a , __a ).prev_sample __snake_case : str = scheduler_output - scheduler_output + torch.ones_like(__a ) return result
0
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ProphetNetTokenizer A__ = False def A_ ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() __snake_case : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : int , __a : Union[str, Any] ) -> List[str]: '''simple docstring''' __snake_case : Optional[int] = 'UNwant\u00E9d,running' __snake_case : List[str] = 'unwanted, running' return input_text, output_text def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Dict = self.tokenizer_class(self.vocab_file ) __snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[str] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def A_ ( self : int ) -> Any: '''simple docstring''' __snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Any ) -> List[str]: '''simple docstring''' __snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Optional[int] ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def A_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' __snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __snake_case : List[Any] = {} for i, token in enumerate(__a ): __snake_case : List[str] = i __snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def A_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' __snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] __snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' ) self.assertIsInstance(__a , __a ) __snake_case : int = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__a , __a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def A_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def A_ ( self : List[Any] ) -> int: '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a ) __snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a ) __snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a ) __snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
0
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__) def _A ( lowercase , lowercase=False ): """simple docstring""" a =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" a =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def _A ( lowercase , lowercase , lowercase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: a ='''''' else: a ='''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) a =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict a =in_proj_weight[ : config.hidden_size, : ] a =in_proj_bias[: config.hidden_size] a =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a =in_proj_weight[ -config.hidden_size :, : ] a =in_proj_bias[-config.hidden_size :] def _A ( lowercase ): """simple docstring""" a =['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(_A , _A ) def _A ( lowercase , lowercase , lowercase ): """simple docstring""" a =dct.pop(_A ) a =val def _A ( ): """simple docstring""" a ='''http://images.cocodataset.org/val2017/000000039769.jpg''' a =Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def _A ( lowercase , lowercase ): """simple docstring""" a =ViTConfig() a =False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": a =True a =int(vit_name[-12:-10] ) a =int(vit_name[-9:-6] ) else: a =10_00 a ='''huggingface/label-files''' a ='''imagenet-1k-id2label.json''' a =json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) a ={int(_A ): v for k, v in idalabel.items()} a =idalabel a ={v: k for k, v in idalabel.items()} a =int(vit_name[-6:-4] ) a =int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): a =1_92 a =7_68 a =12 a =3 elif vit_name[9:].startswith('''small''' ): a =3_84 a =15_36 a =12 a =6 else: pass else: if vit_name[4:].startswith('''small''' ): a =7_68 a =23_04 a =8 a =8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): a =10_24 a =40_96 a =24 a =16 elif vit_name[4:].startswith('''huge''' ): a =12_80 a =51_20 a =32 a =16 # load original model from timm a =timm.create_model(_A , pretrained=_A ) timm_model.eval() # load state_dict of original model, remove and rename some keys a =timm_model.state_dict() if base_model: remove_classification_head_(_A ) a =create_rename_keys(_A , _A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , _A , _A ) # load HuggingFace model if vit_name[-5:] == "in21k": a =ViTModel(_A ).eval() else: a =ViTForImageClassification(_A ).eval() model.load_state_dict(_A ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: a =DeiTImageProcessor(size=config.image_size ) else: a =ViTImageProcessor(size=config.image_size ) a =image_processor(images=prepare_img() , return_tensors='''pt''' ) a =encoding['''pixel_values'''] a =model(_A ) if base_model: a =timm_model.forward_features(_A ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_A , outputs.pooler_output , atol=1E-3 ) else: a =timm_model(_A ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_A , outputs.logits , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_A ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_A ) if __name__ == "__main__": lowerCamelCase_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCamelCase_ : Any = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
81
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["pixel_values"] def __init__( self: List[Any] , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: Optional[int] , ) -> None: super().__init__(**UpperCamelCase ) snake_case__ = size if size is not None else {'shortest_edge': 2_56} snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_resize snake_case__ = size snake_case__ = resample snake_case__ = do_center_crop snake_case__ = crop_size snake_case__ = do_rescale snake_case__ = rescale_factor snake_case__ = do_normalize snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case__ = get_resize_output_image_size(UpperCamelCase , size=size['shortest_edge'] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase ) return center_crop(UpperCamelCase , size=(size['height'], size['width']) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict ) -> np.ndarray: return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ) -> np.ndarray: return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Any , ) -> Optional[Any]: snake_case__ = do_resize if do_resize is not None else self.do_resize snake_case__ = size if size is not None else self.size snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = resample if resample is not None else self.resample snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case__ = crop_size if crop_size is not None else self.crop_size snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_rescale if do_rescale is not None else self.do_rescale snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ = do_normalize if do_normalize is not None else self.do_normalize snake_case__ = image_mean if image_mean is not None else self.image_mean snake_case__ = image_std if image_std is not None else self.image_std snake_case__ = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case__ = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: snake_case__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: snake_case__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: snake_case__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] snake_case__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] snake_case__ = {'pixel_values': images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
307
0
from string import ascii_uppercase A_ : Union[str, Any] ={char: i for i, char in enumerate(ascii_uppercase)} A_ : List[Any] =dict(enumerate(ascii_uppercase)) def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str )-> str: _lowerCamelCase = len(snake_case ) _lowerCamelCase = 0 while True: if x == i: _lowerCamelCase = 0 if len(snake_case ) == len(snake_case ): break key += key[i] i += 1 return key def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str )-> str: _lowerCamelCase = '' _lowerCamelCase = 0 for letter in message: if letter == " ": cipher_text += " " else: _lowerCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str )-> str: _lowerCamelCase = '' _lowerCamelCase = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: _lowerCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def SCREAMING_SNAKE_CASE_ ( )-> None: _lowerCamelCase = 'THE GERMAN ATTACK' _lowerCamelCase = 'SECRET' _lowerCamelCase = generate_key(snake_case , snake_case ) _lowerCamelCase = cipher_text(snake_case , snake_case ) print(f'Encrypted Text = {s}' ) print(f'Original Text = {original_text(snake_case , snake_case )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
351
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available A_ : List[str] ={"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[int] =["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[int] =["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys A_ : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
80
0
import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' a__ : Any = DDIMPipeline a__ : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS a__ : str = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """latents""", """callback""", """callback_steps""", } a__ : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS a__ : List[Any] = False def UpperCamelCase__ ( self) -> Optional[Any]: torch.manual_seed(0) __UpperCamelCase :Optional[int] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) __UpperCamelCase :Union[str, Any] = DDIMScheduler() __UpperCamelCase :str = {'''unet''': unet, '''scheduler''': scheduler} return components def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[Any]: if str(__lowercase).startswith('''mps'''): __UpperCamelCase :Optional[Any] = torch.manual_seed(__lowercase) else: __UpperCamelCase :Dict = torch.Generator(device=__lowercase).manual_seed(__lowercase) __UpperCamelCase :List[str] = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def UpperCamelCase__ ( self) -> List[Any]: __UpperCamelCase :str = '''cpu''' __UpperCamelCase :Tuple = self.get_dummy_components() __UpperCamelCase :Optional[Any] = self.pipeline_class(**__lowercase) pipe.to(__lowercase) pipe.set_progress_bar_config(disable=__lowercase) __UpperCamelCase :Any = self.get_dummy_inputs(__lowercase) __UpperCamelCase :Optional[Any] = pipe(**__lowercase).images __UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3)) __UpperCamelCase :Optional[int] = np.array( [1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4]) __UpperCamelCase :str = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(__lowercase , 1E-3) def UpperCamelCase__ ( self) -> Union[str, Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3) def UpperCamelCase__ ( self) -> Optional[int]: super().test_save_load_local(expected_max_difference=3E-3) def UpperCamelCase__ ( self) -> List[Any]: super().test_save_load_optional_components(expected_max_difference=3E-3) def UpperCamelCase__ ( self) -> Any: super().test_inference_batch_single_identical(expected_max_diff=3E-3) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self) -> Tuple: __UpperCamelCase :List[str] = '''google/ddpm-cifar10-32''' __UpperCamelCase :Union[str, Any] = UNetaDModel.from_pretrained(__lowercase) __UpperCamelCase :str = DDIMScheduler() __UpperCamelCase :Union[str, Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase) ddim.to(__lowercase) ddim.set_progress_bar_config(disable=__lowercase) __UpperCamelCase :List[Any] = torch.manual_seed(0) __UpperCamelCase :Any = ddim(generator=__lowercase , eta=0.0 , output_type='''numpy''').images __UpperCamelCase :int = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCamelCase :int = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def UpperCamelCase__ ( self) -> List[Any]: __UpperCamelCase :Optional[Any] = '''google/ddpm-ema-bedroom-256''' __UpperCamelCase :List[str] = UNetaDModel.from_pretrained(__lowercase) __UpperCamelCase :Dict = DDIMScheduler.from_pretrained(__lowercase) __UpperCamelCase :Optional[Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase) ddpm.to(__lowercase) ddpm.set_progress_bar_config(disable=__lowercase) __UpperCamelCase :Dict = torch.manual_seed(0) __UpperCamelCase :str = ddpm(generator=__lowercase , output_type='''numpy''').images __UpperCamelCase :Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __UpperCamelCase :Tuple = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
43
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"] SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"]) return output a_ = HfArgumentParser(PretokenizationArguments) a_ = parser.parse_args() if args.num_workers is None: a_ = multiprocessing.cpu_count() a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) a_ = time.time() a_ = load_dataset(args.dataset_name, split='train') print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') a_ = time.time() a_ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') a_ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
76
0
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class _A ( unittest.TestCase ): def __init__( self : Optional[int] , __magic_name__ : Any , __magic_name__ : Tuple=13 , __magic_name__ : str=7 , __magic_name__ : Dict=True , __magic_name__ : Tuple=True , __magic_name__ : int=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=99 , __magic_name__ : List[str]=32 , __magic_name__ : Optional[Any]=5 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : int="gelu" , __magic_name__ : str=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Dict=5_12 , __magic_name__ : List[Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.02 , __magic_name__ : Optional[Any]=4 , ) -> Dict: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : Any = seq_length __snake_case : Optional[int] = is_training __snake_case : str = use_attention_mask __snake_case : List[str] = use_token_type_ids __snake_case : Any = use_labels __snake_case : Optional[Any] = vocab_size __snake_case : List[str] = hidden_size __snake_case : str = num_hidden_layers __snake_case : Any = num_attention_heads __snake_case : List[Any] = intermediate_size __snake_case : int = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : Optional[Any] = max_position_embeddings __snake_case : str = type_vocab_size __snake_case : int = type_sequence_label_size __snake_case : str = initializer_range __snake_case : Any = num_choices def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = None if self.use_attention_mask: __snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Optional[int] = None if self.use_token_type_ids: __snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Tuple = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : List[str] = config_and_inputs __snake_case : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowercase__ ( self : Dict ) -> int: """simple docstring""" __snake_case : str = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : int = config_and_inputs __snake_case : str = True __snake_case : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class _A ( __lowercase , unittest.TestCase ): lowercase__: int = True lowercase__: List[str] = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : Optional[int] = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowercase__ ( self : int ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: __snake_case : Optional[Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__magic_name__ ) __snake_case : Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ ) @require_flax class _A ( unittest.TestCase ): @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__magic_name__ ) __snake_case : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) __snake_case : List[Any] = model(__magic_name__ )[0] __snake_case : Optional[Any] = [1, 11, 5_02_65] self.assertEqual(list(output.shape ) , __magic_name__ ) # compare the actual values for a slice. __snake_case : List[Any] = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) ) @slow def lowercase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __snake_case : Tuple = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__magic_name__ ) __snake_case : Union[str, Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) __snake_case : Dict = model(__magic_name__ )[0] # compare the actual values for a slice. __snake_case : Dict = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> bool: """simple docstring""" __snake_case : Optional[int] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( _lowerCamelCase = 5000 ) -> int: """simple docstring""" __snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )] for i, pentagonal_i in enumerate(_lowerCamelCase ): for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case : Optional[int] = pentagonal_nums[j] __snake_case : str = pentagonal_i + pentagonal_j __snake_case : List[Any] = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
13
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "Speech2TextFeatureExtractor" __SCREAMING_SNAKE_CASE = "Speech2TextTokenizer" def __init__( self , __lowerCamelCase , __lowerCamelCase) -> int: super().__init__(__lowerCamelCase , __lowerCamelCase) _A : Any = self.feature_extractor _A : int = False def __call__( self , *__lowerCamelCase , **__lowerCamelCase) -> Union[str, Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__lowerCamelCase , **__lowerCamelCase) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.") _A : Optional[int] = kwargs.pop("raw_speech") else: _A : Optional[int] = kwargs.pop("audio" , __lowerCamelCase) _A : Optional[Any] = kwargs.pop("sampling_rate" , __lowerCamelCase) _A : List[Any] = kwargs.pop("text" , __lowerCamelCase) if len(__lowerCamelCase) > 0: _A : int = args[0] _A : Tuple = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: _A : int = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase) if text is not None: _A : int = self.tokenizer(__lowerCamelCase , **__lowerCamelCase) if text is None: return inputs elif audio is None: return encodings else: _A : Tuple = encodings["input_ids"] return inputs def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Any: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Tuple: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase) @contextmanager def _lowerCamelCase ( self) -> str: warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call.") _A : Optional[int] = True _A : str = self.tokenizer yield _A : Union[str, Any] = self.feature_extractor _A : List[Any] = False
11
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class a ( _lowerCamelCase ): snake_case_ = 42 @flax_register_to_config class a ( nn.Module , _lowerCamelCase , _lowerCamelCase ): snake_case_ = 32 snake_case_ = 4 snake_case_ = 4 snake_case_ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") snake_case_ = False snake_case_ = (320, 640, 1_280, 1_280) snake_case_ = 2 snake_case_ = 8 snake_case_ = None snake_case_ = 1_280 snake_case_ = 0.0 snake_case_ = False snake_case_ = jnp.floataa snake_case_ = True snake_case_ = 0 snake_case_ = False def A_ ( self : Optional[int] , lowercase_ : jax.random.KeyArray ): # init input tensors snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size) snake_case_ = jnp.zeros(lowercase_ , dtype=jnp.floataa ) snake_case_ = jnp.ones((1,) , dtype=jnp.intaa ) snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case_ ,snake_case_ = jax.random.split(lowercase_ ) snake_case_ = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"] def A_ ( self : List[str] ): snake_case_ = self.block_out_channels snake_case_ = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case_ = self.num_attention_heads or self.attention_head_dim # input snake_case_ = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case_ = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype ) snake_case_ = self.only_cross_attention if isinstance(lowercase_ , lowercase_ ): snake_case_ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase_ , lowercase_ ): snake_case_ = (num_attention_heads,) * len(self.down_block_types ) # down snake_case_ = [] snake_case_ = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): snake_case_ = output_channel snake_case_ = block_out_channels[i] snake_case_ = i == len(lowercase_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case_ = FlaxCrossAttnDownBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case_ = FlaxDownBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowercase_ ) snake_case_ = down_blocks # mid snake_case_ = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up snake_case_ = [] snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): snake_case_ = output_channel snake_case_ = reversed_block_out_channels[i] snake_case_ = reversed_block_out_channels[min(i + 1 , len(lowercase_ ) - 1 )] snake_case_ = i == len(lowercase_ ) - 1 if up_block_type == "CrossAttnUpBlock2D": snake_case_ = FlaxCrossAttnUpBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case_ = FlaxUpBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowercase_ ) snake_case_ = output_channel snake_case_ = up_blocks # out snake_case_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) snake_case_ = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : bool = True , lowercase_ : bool = False , ): # 1. time if not isinstance(lowercase_ , jnp.ndarray ): snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case_ = timesteps.astype(dtype=jnp.floataa ) snake_case_ = jnp.expand_dims(lowercase_ , 0 ) snake_case_ = self.time_proj(lowercase_ ) snake_case_ = self.time_embedding(lowercase_ ) # 2. pre-process snake_case_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) ) snake_case_ = self.conv_in(lowercase_ ) # 3. down snake_case_ = (sample,) for down_block in self.down_blocks: if isinstance(lowercase_ , lowercase_ ): snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train ) else: snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: snake_case_ = () for down_block_res_sample, down_block_additional_residual in zip( lowercase_ , lowercase_ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) snake_case_ = new_down_block_res_samples # 4. mid snake_case_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: snake_case_ = down_block_res_samples[-(self.layers_per_block + 1) :] snake_case_ = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowercase_ , lowercase_ ): snake_case_ = up_block( lowercase_ , temb=lowercase_ , encoder_hidden_states=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train , ) else: snake_case_ = up_block(lowercase_ , temb=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train ) # 6. post-process snake_case_ = self.conv_norm_out(lowercase_ ) snake_case_ = nn.silu(lowercase_ ) snake_case_ = self.conv_out(lowercase_ ) snake_case_ = jnp.transpose(lowercase_ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowercase_ )
56
0
import numpy # List of input, output pairs __snake_case = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) __snake_case = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) __snake_case = [2, 4, 1, 5] __snake_case = len(train_data) __snake_case = 0.009 def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase="train" )-> List[Any]: '''simple docstring''' return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output( __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' UpperCAmelCase : Optional[int] =0 for i in range(len(__lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=m )-> Optional[int]: '''simple docstring''' UpperCAmelCase : Dict =0 for i in range(__lowerCAmelCase ): if index == -1: summation_value += _error(__lowerCAmelCase ) else: summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index] return summation_value def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]: '''simple docstring''' UpperCAmelCase : Optional[Any] =summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m return cost_derivative_value def lowerCAmelCase_ ( )-> List[str]: '''simple docstring''' global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCAmelCase : List[str] =0.000002 UpperCAmelCase : int =0 UpperCAmelCase : Dict =0 while True: j += 1 UpperCAmelCase : Optional[int] =[0, 0, 0, 0] for i in range(0 , len(__lowerCAmelCase ) ): UpperCAmelCase : Optional[Any] =get_cost_derivative(i - 1 ) UpperCAmelCase : Dict =( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ): break UpperCAmelCase : int =temp_parameter_vector print(('''Number of iterations:''', j) ) def lowerCAmelCase_ ( )-> Optional[int]: '''simple docstring''' for i in range(len(__lowerCAmelCase ) ): print(('''Actual output value:''', output(__lowerCAmelCase , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(__lowerCAmelCase , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print('''\nTesting gradient descent for a linear hypothesis function.\n''') test_gradient_descent()
78
from ..utils import DummyObject, requires_backends class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Dict = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Dict = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : str = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Dict = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Optional[int] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : List[str] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : List[Any] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : str = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(cls , ['''torch'''] ) def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[str]: '''simple docstring''' requires_backends(__lowerCAmelCase , ['''torch'''] ) def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Tuple: '''simple docstring''' requires_backends(__lowerCAmelCase , ['''torch'''] ) def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[str]: '''simple docstring''' requires_backends(__lowerCAmelCase , ['''torch'''] ) def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]: '''simple docstring''' requires_backends(__lowerCAmelCase , ['''torch'''] ) def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Union[str, Any]: '''simple docstring''' requires_backends(__lowerCAmelCase , ['''torch'''] ) def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]: '''simple docstring''' requires_backends(__lowerCAmelCase , ['''torch'''] ) def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[Any]: '''simple docstring''' requires_backends(__lowerCAmelCase , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : int = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : List[Any] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Dict = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Optional[int] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : List[str] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : int = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : str = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : List[Any] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Dict = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Optional[int] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Tuple = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : List[str] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Tuple = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Tuple = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : List[str] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Dict = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Dict = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Tuple = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : List[str] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : List[str] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Tuple = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Optional[int] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : List[str] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Optional[int] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : int = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : int = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Tuple = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : str = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : int = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : str = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Any = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Dict = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : str = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : Tuple = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=lowerCamelCase__ ): __lowerCamelCase : List[str] = ["""torch"""] def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''torch'''] )
78
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A =logging.get_logger(__name__) __A ={ 'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json', } class _snake_case ( a__ ): lowerCAmelCase :Optional[Any] = '''roc_bert''' def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-1_2 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=768 , _lowerCamelCase=910 , _lowerCamelCase=512 , _lowerCamelCase=2_4858 , _lowerCamelCase=True , **_lowerCamelCase , ): UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : str = num_hidden_layers UpperCAmelCase__ : Dict = num_attention_heads UpperCAmelCase__ : List[Any] = intermediate_size UpperCAmelCase__ : List[Any] = hidden_act UpperCAmelCase__ : Dict = hidden_dropout_prob UpperCAmelCase__ : List[str] = attention_probs_dropout_prob UpperCAmelCase__ : str = initializer_range UpperCAmelCase__ : str = type_vocab_size UpperCAmelCase__ : Dict = layer_norm_eps UpperCAmelCase__ : Union[str, Any] = use_cache UpperCAmelCase__ : Optional[Any] = enable_pronunciation UpperCAmelCase__ : Tuple = enable_shape UpperCAmelCase__ : str = pronunciation_embed_dim UpperCAmelCase__ : Tuple = pronunciation_vocab_size UpperCAmelCase__ : Union[str, Any] = shape_embed_dim UpperCAmelCase__ : List[str] = shape_vocab_size UpperCAmelCase__ : List[str] = concat_input UpperCAmelCase__ : Tuple = position_embedding_type UpperCAmelCase__ : Union[str, Any] = classifier_dropout super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase)
163
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ]) class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> str: if self.framework == "pytorch": subprocess.run( F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , ) assert hasattr(self , "env") def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: _A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings _A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , ) def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv") @parameterized.expand([(2,)]) def _lowerCamelCase ( self , __lowerCamelCase) -> Any: # create estimator _A : Union[str, Any] = self.create_estimator(__lowerCamelCase) # run training estimator.fit() # result dataframe _A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) _A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
11
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : int = "lxmert" _SCREAMING_SNAKE_CASE : int = {} def __init__( self , __UpperCAmelCase=30_522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=9_500 , __UpperCAmelCase=1_600 , __UpperCAmelCase=400 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=9 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=2_048 , __UpperCAmelCase=4 , __UpperCAmelCase=6.67 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : str = vocab_size __UpperCAmelCase : int = hidden_size __UpperCAmelCase : List[str] = num_attention_heads __UpperCAmelCase : Dict = hidden_act __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : int = attention_probs_dropout_prob __UpperCAmelCase : int = max_position_embeddings __UpperCAmelCase : int = type_vocab_size __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : str = layer_norm_eps __UpperCAmelCase : List[Any] = num_qa_labels __UpperCAmelCase : Optional[int] = num_object_labels __UpperCAmelCase : Optional[Any] = num_attr_labels __UpperCAmelCase : Tuple = l_layers __UpperCAmelCase : Union[str, Any] = x_layers __UpperCAmelCase : Optional[int] = r_layers __UpperCAmelCase : Optional[Any] = visual_feat_dim __UpperCAmelCase : Dict = visual_pos_dim __UpperCAmelCase : Dict = visual_loss_normalizer __UpperCAmelCase : Any = task_matched __UpperCAmelCase : List[Any] = task_mask_lm __UpperCAmelCase : Optional[Any] = task_obj_predict __UpperCAmelCase : Dict = task_qa __UpperCAmelCase : Any = visual_obj_loss __UpperCAmelCase : Union[str, Any] = visual_attr_loss __UpperCAmelCase : Tuple = visual_feat_loss __UpperCAmelCase : str = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers} super().__init__(**__UpperCAmelCase )
366
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Tuple = seq_length __UpperCAmelCase : str = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[Any] = use_token_type_ids __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : str = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Dict = type_vocab_size __UpperCAmelCase : List[Any] = type_sequence_label_size __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[str] = num_labels __UpperCAmelCase : str = num_choices __UpperCAmelCase : List[Any] = scope def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> Optional[Any]: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Tuple = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Any = True __UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCAmelCase : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice __UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : List[str] = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = LlamaModelTester(self ) __UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = 3 __UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[int] = 3 __UpperCAmelCase : Optional[Any] = """single_label_classification""" __UpperCAmelCase : int = input_dict["""input_ids"""] __UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = 3 __UpperCAmelCase : str = """multi_label_classification""" __UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def __A ( self ) -> Dict: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size ) __UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() __UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0} __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() __UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) @require_torch class _A ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) __UpperCAmelCase : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) __UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) __UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) __UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) ) __UpperCAmelCase : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Model is curently gated""" ) @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" __UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """ __UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) __UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" ) __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase ) # greedy generation outputs __UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
16
0
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
97
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable __A : List[Any] = { 'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'], 'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXJapaneseForCausalLM', 'GPTNeoXJapaneseLayer', 'GPTNeoXJapaneseModel', 'GPTNeoXJapanesePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
154
0
'''simple docstring''' import os from datetime import datetime as dt from github import Github A__ : Union[str, Any] = [ '''good first issue''', '''feature request''', '''wip''', ] def a_ ( ) -> Optional[int]: __snake_case : List[str] = Github(os.environ['GITHUB_TOKEN'] ) __snake_case : Any = g.get_repo('huggingface/accelerate' ) __snake_case : List[Any] = repo.get_issues(state='open' ) for issue in open_issues: __snake_case : Tuple = sorted([comment for comment in issue.get_comments()] ,key=lambda _UpperCAmelCase : i.created_at ,reverse=_UpperCAmelCase ) __snake_case : Tuple = comments[0] if len(_UpperCAmelCase ) > 0 else None __snake_case : Dict = dt.utcnow() __snake_case : Dict = (current_time - issue.updated_at).days __snake_case : str = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state='closed' ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
359
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple: __snake_case : str = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]: __snake_case : Tuple = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict: __snake_case : Union[str, Any] = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def a_ ( ) -> Optional[Any]: __snake_case : Any = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple: __snake_case : List[str] = 'imagenet-1k-id2label.json' __snake_case : Dict = 10_00 __snake_case : Union[str, Any] = 'huggingface/label-files' __snake_case : str = num_labels __snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) ) __snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __snake_case : Optional[Any] = idalabel __snake_case : str = {v: k for k, v in idalabel.items()} __snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13": __snake_case : Tuple = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21": __snake_case : str = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __snake_case : Dict = [2, 2, 20] __snake_case : Any = [3, 12, 16] __snake_case : Tuple = [1_92, 7_68, 10_24] __snake_case : str = CvtForImageClassification(_UpperCAmelCase ) __snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) __snake_case : int = image_size __snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) ) __snake_case : List[Any] = OrderedDict() __snake_case : Union[str, Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase ) __snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): __snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : str = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): __snake_case : List[str] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": A__ : Dict = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=3_8_4, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) A__ : Tuple = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
0
0
import torch from diffusers import DiffusionPipeline class lowercase_ ( lowercase ): '''simple docstring''' def __init__( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] ) ->Optional[Any]: """simple docstring""" super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) def __call__( self : Tuple ) ->List[Any]: """simple docstring""" a = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) a = 1 a = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample a = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample a = scheduler_output - scheduler_output + torch.ones_like(__UpperCAmelCase ) return result
0
import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class lowercase_ ( lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = ProphetNetTokenizer __snake_case = False def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" super().setUp() a = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ) ->Dict: """simple docstring""" a = '''UNwant\u00E9d,running''' a = '''unwanted, running''' return input_text, output_text def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" a = self.tokenizer_class(self.vocab_file ) a = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] ) def __lowerCAmelCase ( self : int ) ->Any: """simple docstring""" a = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def __lowerCAmelCase ( self : Any ) ->int: """simple docstring""" a = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]: """simple docstring""" a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def __lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def __lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" a = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]: """simple docstring""" a = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __lowerCAmelCase ( self : Tuple ) ->Tuple: """simple docstring""" a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __lowerCAmelCase ( self : Any ) ->int: """simple docstring""" a = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def __lowerCAmelCase ( self : Union[str, Any] ) ->int: """simple docstring""" a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] a = {} for i, token in enumerate(__UpperCAmelCase ): a = i a = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) @require_torch def __lowerCAmelCase ( self : int ) ->int: """simple docstring""" a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] a = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102] a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]: """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def __lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def __lowerCAmelCase ( self : List[Any] ) ->List[str]: """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) @slow def __lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase ) a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase ) a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ) a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
0
1
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = DistilBertTokenizer snake_case_ = DistilBertTokenizerFast snake_case_ = True @slow def UpperCamelCase_ ( self : Union[str, Any] ): __A = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" ) __A = tokenizer.encode("sequence builders" ,add_special_tokens=A ) __A = tokenizer.encode("multi-sequence build" ,add_special_tokens=A ) __A = tokenizer.build_inputs_with_special_tokens(A ) __A = tokenizer.build_inputs_with_special_tokens(A ,A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
364
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE :int = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Dict = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :int = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Optional[int] = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
124
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''', '''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''', '''uclanlp/visualbert-vqa-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json''' ), '''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''', '''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''', '''uclanlp/visualbert-vcr-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json''' ), '''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''', '''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''', '''uclanlp/visualbert-nlvr2-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json''' ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class snake_case_ ( __A ): __A : Union[str, Any] = "visual_bert" def __init__( self : Any , lowercase_ : Any=3_05_22 , lowercase_ : List[Any]=7_68 , lowercase_ : List[str]=5_12 , lowercase_ : Dict=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : List[Any]=30_72 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : int=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : int=2 , lowercase_ : List[Any]=0.02 , lowercase_ : List[Any]=1E-12 , lowercase_ : str=False , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=1 , lowercase_ : List[str]=0 , lowercase_ : str=2 , **lowercase_ : Dict , ) -> List[str]: super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) lowercase__ : Optional[int] = vocab_size lowercase__ : List[str] = max_position_embeddings lowercase__ : List[str] = hidden_size lowercase__ : str = visual_embedding_dim lowercase__ : List[Any] = num_hidden_layers lowercase__ : str = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Any = hidden_act lowercase__ : int = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : Dict = initializer_range lowercase__ : str = type_vocab_size lowercase__ : Any = layer_norm_eps lowercase__ : Tuple = bypass_transformer lowercase__ : Tuple = special_visual_initialize
87
def lowercase_ ( _lowerCamelCase : int): lowercase__ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
1
"""simple docstring""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def _A ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: snake_case_ = FunnelConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) snake_case_ = FunnelBaseModel(lowercase__ ) if base_model else FunnelModel(lowercase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowercase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.' ) __SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
356
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> list: snake_case_ = length or len(_SCREAMING_SNAKE_CASE ) snake_case_ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: snake_case_ , snake_case_ = list_data[i + 1], list_data[i] snake_case_ = True return list_data if not swapped else bubble_sort(_SCREAMING_SNAKE_CASE , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
233
0
import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## SCREAMING_SNAKE_CASE__ : Optional[int] = 16 SCREAMING_SNAKE_CASE__ : Tuple = 32 def __magic_name__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ) -> Union[str, Any]: __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__lowerCAmelCase : Tuple ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( __lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__lowerCAmelCase : Optional[int] ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( __lowerCAmelCase , padding='''longest''' , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors='''pt''' , ) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=(accelerator.mixed_precision == '''fp8''') , ) return train_dataloader, eval_dataloader def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> Any: # Initialize accelerator __lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation __lowerCamelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __lowerCamelCase = batch_size // MAX_GPU_BATCH_SIZE __lowerCamelCase = MAX_GPU_BATCH_SIZE set_seed(__lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() , lr=__lowerCAmelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = accelerator.prepare( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Now we train the model for epoch in range(__lowerCAmelCase ): model.train() for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase = model(**__lowerCAmelCase ) __lowerCamelCase = outputs.loss __lowerCamelCase = loss / gradient_accumulation_steps accelerator.backward(__lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**__lowerCAmelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase , __lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__lowerCAmelCase , references=__lowerCAmelCase , ) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , __lowerCAmelCase ) def __magic_name__ ( ) -> Any: __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
270
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : str a__ : List[str] a__ : Optional[List[str]] @dataclass class lowerCAmelCase__ : a__ : List[int] a__ : List[int] a__ : Optional[List[int]] = None a__ : Optional[List[int]] = None class lowerCAmelCase__ ( __lowercase ): a__ : Optional[Any] = """train""" a__ : Optional[int] = """dev""" a__ : Dict = """test""" class lowerCAmelCase__ : @staticmethod def __A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]: raise NotImplementedError @staticmethod def __A ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]: raise NotImplementedError @staticmethod def __A ( SCREAMING_SNAKE_CASE__ : List[InputExample] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : str=-1_00 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , ) -> List[InputFeatures]: __lowerCamelCase = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} __lowerCamelCase = [] for ex_index, example in enumerate(SCREAMING_SNAKE_CASE__ ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [] __lowerCamelCase = [] for word, label in zip(example.words , example.labels ): __lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(SCREAMING_SNAKE_CASE__ ) > 0: tokens.extend(SCREAMING_SNAKE_CASE__ ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE__ ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. __lowerCamelCase = tokenizer.num_special_tokens_to_add() if len(SCREAMING_SNAKE_CASE__ ) > max_seq_length - special_tokens_count: __lowerCamelCase = tokens[: (max_seq_length - special_tokens_count)] __lowerCamelCase = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] __lowerCamelCase = [sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE__ ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: __lowerCamelCase = [cls_token] + tokens __lowerCamelCase = [pad_token_label_id] + label_ids __lowerCamelCase = [cls_token_segment_id] + segment_ids __lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. __lowerCamelCase = [1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE__ ) # Zero-pad up to the sequence length. __lowerCamelCase = max_seq_length - len(SCREAMING_SNAKE_CASE__ ) if pad_on_left: __lowerCamelCase = ([pad_token] * padding_length) + input_ids __lowerCamelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask __lowerCamelCase = ([pad_token_segment_id] * padding_length) + segment_ids __lowerCamelCase = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''' ) logger.info('''guid: %s''' , example.guid ) logger.info('''tokens: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in tokens] ) ) logger.info('''input_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_ids] ) ) logger.info('''input_mask: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_mask] ) ) logger.info('''segment_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in segment_ids] ) ) logger.info('''label_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: __lowerCamelCase = None features.append( InputFeatures( input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class lowerCAmelCase__ ( __lowercase ): a__ : List[InputFeatures] a__ : int = nn.CrossEntropyLoss().ignore_index def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> Union[str, Any]: # Load data features from cache or dataset file __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowerCamelCase = cached_features_file + '''.lock''' with FileLock(SCREAMING_SNAKE_CASE__ ): if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not overwrite_cache: logger.info(f'''Loading features from cached file {cached_features_file}''' ) __lowerCamelCase = torch.load(SCREAMING_SNAKE_CASE__ ) else: logger.info(f'''Creating features from dataset file at {data_dir}''' ) __lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # TODO clean up all this to leverage built-in features of tokenizers __lowerCamelCase = token_classification_task.convert_examples_to_features( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(f'''Saving features into cached file {cached_features_file}''' ) torch.save(self.features , SCREAMING_SNAKE_CASE__ ) def __len__( self : Dict ) -> str: return len(self.features ) def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> InputFeatures: return self.features[i] if is_tf_available(): import tensorflow as tf class lowerCAmelCase__ : a__ : List[InputFeatures] a__ : int = -100 def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> List[Any]: __lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # TODO clean up all this to leverage built-in features of tokenizers __lowerCamelCase = token_classification_task.convert_examples_to_features( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: __lowerCamelCase = tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: __lowerCamelCase = tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] ), '''token_type_ids''': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : List[Any] ) -> Any: return len(self.features ) def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> InputFeatures: return self.features[i]
270
1
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() __a: Tuple = logging.get_logger("""transformers.models.speecht5""") def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): hf_model.apply_weight_norm() lowercase__ : List[Any] = checkpoint['''input_conv.weight_g'''] lowercase__ : Tuple = checkpoint['''input_conv.weight_v'''] lowercase__ : int = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): lowercase__ : List[str] = checkpoint[F"""upsamples.{i}.1.weight_g"""] lowercase__ : Optional[int] = checkpoint[F"""upsamples.{i}.1.weight_v"""] lowercase__ : str = checkpoint[F"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): lowercase__ : Any = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""] lowercase__ : List[str] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""] lowercase__ : Tuple = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""] lowercase__ : Optional[int] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""] lowercase__ : str = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""] lowercase__ : Optional[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""] lowercase__ : Any = checkpoint['''output_conv.1.weight_g'''] lowercase__ : str = checkpoint['''output_conv.1.weight_v'''] lowercase__ : Optional[int] = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , ): if config_path is not None: lowercase__ : Optional[int] = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase ) else: lowercase__ : Union[str, Any] = SpeechTaHifiGanConfig() lowercase__ : int = SpeechTaHifiGan(UpperCAmelCase ) lowercase__ : List[str] = torch.load(UpperCAmelCase ) load_weights(orig_checkpoint['''model''']['''generator'''] , UpperCAmelCase , UpperCAmelCase ) lowercase__ : Any = np.load(UpperCAmelCase ) lowercase__ : List[Any] = stats[0].reshape(-1 ) lowercase__ : Dict = stats[1].reshape(-1 ) lowercase__ : Dict = torch.from_numpy(UpperCAmelCase ).float() lowercase__ : List[str] = torch.from_numpy(UpperCAmelCase ).float() model.save_pretrained(UpperCAmelCase ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(UpperCAmelCase ) if __name__ == "__main__": __a: str = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) __a: Optional[Any] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
354
'''simple docstring''' import numpy as np def __UpperCamelCase ( UpperCAmelCase ): return 1 / (1 + np.exp(-vector )) def __UpperCamelCase ( UpperCAmelCase ): return vector * sigmoid(UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
214
0
def a__ ( UpperCAmelCase : List[Any] = 1_000 ) -> int: UpperCAmelCase : List[str] = 3 UpperCAmelCase : List[str] = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f"""{solution() = }""")
336
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
0
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class __magic_name__ ( lowerCAmelCase ): def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCAmelCase : str =self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(snake_case , 'hidden_sizes')) self.parent.assertTrue(hasattr(snake_case , 'num_attention_heads')) self.parent.assertTrue(hasattr(snake_case , 'num_encoder_blocks')) class __magic_name__ : def __init__( self , snake_case , snake_case=1_3 , snake_case=6_4 , snake_case=3 , snake_case=4 , snake_case=[2, 2, 2, 2] , snake_case=[8, 4, 2, 1] , snake_case=[1_6, 3_2, 6_4, 1_2_8] , snake_case=[1, 4, 8, 1_6] , snake_case=[1, 2, 4, 8] , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=3 , snake_case=None , ) -> str: '''simple docstring''' _UpperCAmelCase : List[str] =parent _UpperCAmelCase : List[str] =batch_size _UpperCAmelCase : List[Any] =image_size _UpperCAmelCase : List[Any] =num_channels _UpperCAmelCase : Any =num_encoder_blocks _UpperCAmelCase : Optional[int] =sr_ratios _UpperCAmelCase : Optional[Any] =depths _UpperCAmelCase : List[str] =hidden_sizes _UpperCAmelCase : int =downsampling_rates _UpperCAmelCase : Dict =num_attention_heads _UpperCAmelCase : int =is_training _UpperCAmelCase : Dict =use_labels _UpperCAmelCase : List[str] =hidden_act _UpperCAmelCase : List[Any] =hidden_dropout_prob _UpperCAmelCase : str =attention_probs_dropout_prob _UpperCAmelCase : str =initializer_range _UpperCAmelCase : Optional[Any] =num_labels _UpperCAmelCase : Any =scope def lowerCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCAmelCase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _UpperCAmelCase : Any =None if self.use_labels: _UpperCAmelCase : Union[str, Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels) _UpperCAmelCase : int =self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self) -> List[Any]: '''simple docstring''' return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> List[str]: '''simple docstring''' _UpperCAmelCase : List[str] =SegformerModel(config=snake_case) model.to(snake_case) model.eval() _UpperCAmelCase : List[Any] =model(snake_case) _UpperCAmelCase : Dict =self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width)) def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> str: '''simple docstring''' _UpperCAmelCase : Dict =self.num_labels _UpperCAmelCase : str =SegformerForSemanticSegmentation(snake_case) model.to(snake_case) model.eval() _UpperCAmelCase : Optional[int] =model(snake_case) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4)) _UpperCAmelCase : int =model(snake_case , labels=snake_case) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4)) self.parent.assertGreater(result.loss , 0.0) def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> List[str]: '''simple docstring''' _UpperCAmelCase : List[Any] =1 _UpperCAmelCase : List[str] =SegformerForSemanticSegmentation(config=snake_case) model.to(snake_case) model.eval() _UpperCAmelCase : Union[str, Any] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(snake_case) _UpperCAmelCase : Optional[int] =model(snake_case , labels=snake_case) self.parent.assertGreater(result.loss , 0.0) def lowerCAmelCase ( self) -> str: '''simple docstring''' _UpperCAmelCase : Any =self.prepare_config_and_inputs() _UpperCAmelCase : List[Any] =config_and_inputs _UpperCAmelCase : Optional[int] ={'pixel_values': pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ): UpperCAmelCase =( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) UpperCAmelCase =( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase =True UpperCAmelCase =False UpperCAmelCase =False UpperCAmelCase =False def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCAmelCase : Tuple =SegformerModelTester(self) _UpperCAmelCase : Tuple =SegformerConfigTester(self , config_class=snake_case) def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case) def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*snake_case) def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*snake_case) @unittest.skip('SegFormer does not use inputs_embeds') def lowerCAmelCase ( self) -> List[Any]: '''simple docstring''' pass @unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods') def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' pass def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase : List[str] =model_class(snake_case) _UpperCAmelCase : Dict =inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase : Optional[int] =[*signature.parameters.keys()] _UpperCAmelCase : Optional[int] =['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case) def lowerCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCAmelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Tuple =True for model_class in self.all_model_classes: _UpperCAmelCase : List[Any] =True _UpperCAmelCase : Dict =False _UpperCAmelCase : Optional[Any] =True _UpperCAmelCase : Any =model_class(snake_case) model.to(snake_case) model.eval() with torch.no_grad(): _UpperCAmelCase : List[Any] =model(**self._prepare_for_class(snake_case , snake_case)) _UpperCAmelCase : str =outputs.attentions _UpperCAmelCase : Dict =sum(self.model_tester.depths) self.assertEqual(len(snake_case) , snake_case) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase : str =True _UpperCAmelCase : str =model_class(snake_case) model.to(snake_case) model.eval() with torch.no_grad(): _UpperCAmelCase : Tuple =model(**self._prepare_for_class(snake_case , snake_case)) _UpperCAmelCase : List[str] =outputs.attentions self.assertEqual(len(snake_case) , snake_case) # verify the first attentions (first block, first layer) _UpperCAmelCase : str =(self.model_tester.image_size // 4) ** 2 _UpperCAmelCase : Optional[int] =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _UpperCAmelCase : Tuple =(self.model_tester.image_size // 3_2) ** 2 _UpperCAmelCase : Union[str, Any] =(self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _UpperCAmelCase : Any =len(snake_case) # Check attention is always last and order is fine _UpperCAmelCase : Union[str, Any] =True _UpperCAmelCase : int =True _UpperCAmelCase : List[Any] =model_class(snake_case) model.to(snake_case) model.eval() with torch.no_grad(): _UpperCAmelCase : Any =model(**self._prepare_for_class(snake_case , snake_case)) self.assertEqual(out_len + 1 , len(snake_case)) _UpperCAmelCase : Dict =outputs.attentions self.assertEqual(len(snake_case) , snake_case) # verify the first attentions (first block, first layer) _UpperCAmelCase : Optional[int] =(self.model_tester.image_size // 4) ** 2 _UpperCAmelCase : Any =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def lowerCAmelCase ( self) -> int: '''simple docstring''' def check_hidden_states_output(snake_case , snake_case , snake_case): _UpperCAmelCase : int =model_class(snake_case) model.to(snake_case) model.eval() with torch.no_grad(): _UpperCAmelCase : Union[str, Any] =model(**self._prepare_for_class(snake_case , snake_case)) _UpperCAmelCase : Optional[int] =outputs.hidden_states _UpperCAmelCase : Dict =self.model_tester.num_encoder_blocks self.assertEqual(len(snake_case) , snake_case) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase : Optional[int] =True check_hidden_states_output(snake_case , snake_case , snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase : Union[str, Any] =True check_hidden_states_output(snake_case , snake_case , snake_case) def lowerCAmelCase ( self) -> List[Any]: '''simple docstring''' if not self.model_tester.is_training: return _UpperCAmelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Dict =True for model_class in self.all_model_classes: if model_class in get_values(snake_case): continue _UpperCAmelCase : List[str] =model_class(snake_case) model.to(snake_case) model.train() _UpperCAmelCase : Any =self._prepare_for_class(snake_case , snake_case , return_labels=snake_case) _UpperCAmelCase : List[str] =model(**snake_case).loss loss.backward() @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' pass @slow def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : List[Any] =SegformerModel.from_pretrained(snake_case) self.assertIsNotNone(snake_case) def lowerCamelCase__ ( ): '''simple docstring''' _UpperCAmelCase : Optional[Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class __magic_name__ ( unittest.TestCase ): @slow def lowerCAmelCase ( self) -> str: '''simple docstring''' _UpperCAmelCase : Any =SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case) _UpperCAmelCase : Optional[Any] =SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to( snake_case) _UpperCAmelCase : Union[str, Any] =prepare_img() _UpperCAmelCase : Optional[Any] =image_processor(images=snake_case , return_tensors='pt') _UpperCAmelCase : str =encoded_inputs.pixel_values.to(snake_case) with torch.no_grad(): _UpperCAmelCase : Any =model(snake_case) _UpperCAmelCase : Optional[int] =torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8)) self.assertEqual(outputs.logits.shape , snake_case) _UpperCAmelCase : Union[str, Any] =torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ]).to(snake_case) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1E-4)) @slow def lowerCAmelCase ( self) -> int: '''simple docstring''' _UpperCAmelCase : List[Any] =SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case) _UpperCAmelCase : str =SegformerForSemanticSegmentation.from_pretrained( 'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(snake_case) _UpperCAmelCase : Tuple =prepare_img() _UpperCAmelCase : Optional[Any] =image_processor(images=snake_case , return_tensors='pt') _UpperCAmelCase : str =encoded_inputs.pixel_values.to(snake_case) with torch.no_grad(): _UpperCAmelCase : Optional[int] =model(snake_case) _UpperCAmelCase : Optional[Any] =torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8)) self.assertEqual(outputs.logits.shape , snake_case) _UpperCAmelCase : Optional[Any] =torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ]).to(snake_case) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1E-1)) @slow def lowerCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCAmelCase : Optional[int] =SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case) _UpperCAmelCase : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to( snake_case) _UpperCAmelCase : List[str] =prepare_img() _UpperCAmelCase : str =image_processor(images=snake_case , return_tensors='pt') _UpperCAmelCase : Union[str, Any] =encoded_inputs.pixel_values.to(snake_case) with torch.no_grad(): _UpperCAmelCase : Optional[Any] =model(snake_case) _UpperCAmelCase : Any =outputs.logits.detach().cpu() _UpperCAmelCase : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(5_0_0, 3_0_0)]) _UpperCAmelCase : List[str] =torch.Size((5_0_0, 3_0_0)) self.assertEqual(segmentation[0].shape , snake_case) _UpperCAmelCase : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=snake_case) _UpperCAmelCase : int =torch.Size((1_2_8, 1_2_8)) self.assertEqual(segmentation[0].shape , snake_case)
363
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __magic_name__ ( unittest.TestCase ): def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=4 , ) -> Dict: '''simple docstring''' _UpperCAmelCase : Dict =parent _UpperCAmelCase : Dict =batch_size _UpperCAmelCase : List[Any] =seq_length _UpperCAmelCase : List[str] =is_training _UpperCAmelCase : Optional[int] =use_attention_mask _UpperCAmelCase : Dict =use_token_type_ids _UpperCAmelCase : Dict =use_labels _UpperCAmelCase : Optional[Any] =vocab_size _UpperCAmelCase : str =hidden_size _UpperCAmelCase : Dict =num_hidden_layers _UpperCAmelCase : Tuple =num_attention_heads _UpperCAmelCase : List[str] =intermediate_size _UpperCAmelCase : List[str] =hidden_act _UpperCAmelCase : int =hidden_dropout_prob _UpperCAmelCase : Optional[int] =attention_probs_dropout_prob _UpperCAmelCase : Optional[Any] =max_position_embeddings _UpperCAmelCase : Union[str, Any] =type_vocab_size _UpperCAmelCase : Dict =type_sequence_label_size _UpperCAmelCase : Union[str, Any] =initializer_range _UpperCAmelCase : Any =num_choices def lowerCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCAmelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase : str =None if self.use_attention_mask: _UpperCAmelCase : Dict =random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase : Optional[Any] =None if self.use_token_type_ids: _UpperCAmelCase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCAmelCase : Union[str, Any] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self) -> str: '''simple docstring''' _UpperCAmelCase : Dict =self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str =config_and_inputs _UpperCAmelCase : List[Any] ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowerCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : Tuple =self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] =config_and_inputs _UpperCAmelCase : Tuple =True _UpperCAmelCase : Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) _UpperCAmelCase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ): UpperCAmelCase =True UpperCAmelCase =( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCAmelCase : int =FlaxRobertaPreLayerNormModelTester(self) @slow def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCAmelCase : List[str] =model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case) _UpperCAmelCase : Dict =model(np.ones((1, 1))) self.assertIsNotNone(snake_case) @require_flax class __magic_name__ ( unittest.TestCase ): @slow def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCAmelCase : Tuple =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case) _UpperCAmelCase : Optional[int] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa) _UpperCAmelCase : str =model(snake_case)[0] _UpperCAmelCase : int =[1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape) , snake_case) # compare the actual values for a slice. _UpperCAmelCase : List[str] =np.array( [[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1E-4)) @slow def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCAmelCase : Dict =FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case) _UpperCAmelCase : List[str] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa) _UpperCAmelCase : Tuple =model(snake_case)[0] # compare the actual values for a slice. _UpperCAmelCase : List[str] =np.array( [[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1E-4))
242
0
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() _UpperCamelCase : Tuple = logging.get_logger(__name__) _UpperCamelCase : Any = "Hello world! cécé herlolip" def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : Optional[Any] = FairseqRobertaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) roberta.eval() # disable dropout lowercase__ : Union[str, Any] = roberta.model.encoder.sentence_encoder lowercase__ : Optional[int] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: lowercase__ : List[str] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our RoBERTa config:' , __SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = XLMRobertaXLForSequenceClassification(__SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(__SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings lowercase__ : List[str] = roberta_sent_encoder.embed_tokens.weight lowercase__ : List[Any] = roberta_sent_encoder.embed_positions.weight lowercase__ : List[Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. lowercase__ : List[str] = roberta_sent_encoder.layer_norm.weight lowercase__ : Optional[Any] = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer lowercase__ : List[str] = model.roberta.encoder.layer[i] lowercase__ : str = roberta_sent_encoder.layers[i] lowercase__ : str = layer.attention lowercase__ : Dict = roberta_layer.self_attn_layer_norm.weight lowercase__ : Dict = roberta_layer.self_attn_layer_norm.bias # self attention lowercase__ : List[str] = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) lowercase__ : int = roberta_layer.self_attn.q_proj.weight lowercase__ : str = roberta_layer.self_attn.q_proj.bias lowercase__ : Union[str, Any] = roberta_layer.self_attn.k_proj.weight lowercase__ : Union[str, Any] = roberta_layer.self_attn.k_proj.bias lowercase__ : int = roberta_layer.self_attn.v_proj.weight lowercase__ : Any = roberta_layer.self_attn.v_proj.bias # self-attention output lowercase__ : List[str] = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape lowercase__ : Tuple = roberta_layer.self_attn.out_proj.weight lowercase__ : Optional[int] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm lowercase__ : Optional[Any] = roberta_layer.final_layer_norm.weight lowercase__ : List[str] = roberta_layer.final_layer_norm.bias # intermediate lowercase__ : Optional[Any] = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape lowercase__ : List[str] = roberta_layer.fca.weight lowercase__ : str = roberta_layer.fca.bias # output lowercase__ : int = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape lowercase__ : Dict = roberta_layer.fca.weight lowercase__ : Optional[int] = roberta_layer.fca.bias # end of layer if classification_head: lowercase__ : Union[str, Any] = roberta.model.classification_heads['mnli'].dense.weight lowercase__ : Optional[int] = roberta.model.classification_heads['mnli'].dense.bias lowercase__ : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight lowercase__ : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.bias else: # LM Head lowercase__ : Tuple = roberta.model.encoder.lm_head.dense.weight lowercase__ : Optional[int] = roberta.model.encoder.lm_head.dense.bias lowercase__ : Tuple = roberta.model.encoder.lm_head.layer_norm.weight lowercase__ : int = roberta.model.encoder.lm_head.layer_norm.bias lowercase__ : List[str] = roberta.model.encoder.lm_head.weight lowercase__ : Tuple = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. lowercase__ : Tuple = roberta.encode(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 lowercase__ : Tuple = model(__SCREAMING_SNAKE_CASE )[0] if classification_head: lowercase__ : List[str] = roberta.model.classification_heads['mnli'](roberta.extract_features(__SCREAMING_SNAKE_CASE ) ) else: lowercase__ : Tuple = roberta.model(__SCREAMING_SNAKE_CASE )[0] print(our_output.shape , their_output.shape ) lowercase__ : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 lowercase__ : Optional[Any] = torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) pathlib.Path(__SCREAMING_SNAKE_CASE ).mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) _UpperCamelCase : int = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
77
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class A_ : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ): return None class A_ : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ): return None class A_ ( unittest.TestCase ): '''simple docstring''' _UpperCamelCase : Tuple = [ # (model_name, model_kwargs) ("""bert-base-cased""", {}), ("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def SCREAMING_SNAKE_CASE__ ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(snake_case , 'tf' , 12 , **snake_case ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(snake_case , 'pt' , 12 , **snake_case ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self ): from transformers import BertModel lowercase = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(snake_case ) ) vocab_file.flush() lowercase = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase = BertModel(BertConfig(vocab_size=len(snake_case ) ) ) model.save_pretrained(snake_case ) self._test_export(snake_case , 'pt' , 12 , snake_case ) @require_tf @slow def SCREAMING_SNAKE_CASE__ ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase = self._test_export(snake_case , 'tf' , 12 , **snake_case ) lowercase = quantize(Path(snake_case ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase = self._test_export(snake_case , 'pt' , 12 , **snake_case ) lowercase = quantize(snake_case ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ): try: # Compute path with TemporaryDirectory() as tempdir: lowercase = Path(snake_case ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case ) return path except Exception as e: self.fail(snake_case ) @require_torch @require_tokenizers @slow def SCREAMING_SNAKE_CASE__ ( self ): from transformers import BertModel lowercase = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(snake_case , snake_case , 'pt' ) @require_tf @require_tokenizers @slow def SCREAMING_SNAKE_CASE__ ( self ): from transformers import TFBertModel lowercase = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(snake_case , snake_case , 'tf' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ): lowercase = FeatureExtractionPipeline(snake_case , snake_case ) lowercase = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase , lowercase , lowercase , lowercase = infer_shapes(snake_case , snake_case ) # Assert all variables are present self.assertEqual(len(snake_case ) , len(snake_case ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , snake_case ) self.assertSequenceEqual(variable_names[3:] , snake_case ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] , {0: 'batch'} ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase , lowercase = ensure_valid_input(FuncContiguousArgs() , snake_case , snake_case ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(snake_case ) , 3 ) # Should have exactly the same input names self.assertEqual(set(snake_case ) , set(snake_case ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(snake_case , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase , lowercase = ensure_valid_input(FuncNonContiguousArgs() , snake_case , snake_case ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(snake_case ) , 1 ) self.assertEqual(len(snake_case ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] , 'input_ids' ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
195
0
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters UpperCamelCase_ = logging.get_logger(__name__) def lowerCamelCase_ ( _a : int , _a : Union[str, Any] , _a : Any , _a : Tuple=None , _a : Dict=None ): '''simple docstring''' if "." in tensor_name: UpperCAmelCase_ : int = tensor_name.split(""".""" ) for split in splits[:-1]: UpperCAmelCase_ : str = getattr(__lowerCAmelCase , __lowerCAmelCase ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) UpperCAmelCase_ : Dict = new_module UpperCAmelCase_ : Dict = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) UpperCAmelCase_ : Optional[Any] = tensor_name in module._buffers UpperCAmelCase_ : Optional[Any] = getattr(__lowerCAmelCase , __lowerCAmelCase ) if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None: raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Dict = False if is_buffer or not is_bitsandbytes_available(): UpperCAmelCase_ : Union[str, Any] = False UpperCAmelCase_ : List[Any] = False else: UpperCAmelCase_ : Union[str, Any] = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) UpperCAmelCase_ : List[str] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: UpperCAmelCase_ : List[str] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: UpperCAmelCase_ : int = old_value.to(__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , torch.Tensor ): UpperCAmelCase_ : Optional[Any] = value.to("""cpu""" ) if value.dtype == torch.inta: UpperCAmelCase_ : int = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse( """0.37.2""" ) if not is_abit_serializable: raise ValueError( """Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """ """Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" ) else: UpperCAmelCase_ : Any = torch.tensor(__lowerCAmelCase , device="""cpu""" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , __lowerCAmelCase ) and fpaa_statistics is None: UpperCAmelCase_ : Tuple = new_value.T UpperCAmelCase_ : str = old_value.__dict__ if is_abit: UpperCAmelCase_ : List[Any] = bnb.nn.IntaParams(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase ) elif is_abit: UpperCAmelCase_ : List[Any] = bnb.nn.Paramsabit(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase ) UpperCAmelCase_ : str = new_value if fpaa_statistics is not None: setattr(module.weight , """SCB""" , fpaa_statistics.to(__lowerCAmelCase ) ) else: if value is None: UpperCAmelCase_ : List[Any] = old_value.to(__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , torch.Tensor ): UpperCAmelCase_ : Optional[int] = value.to(__lowerCAmelCase ) else: UpperCAmelCase_ : Union[str, Any] = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase ) if is_buffer: UpperCAmelCase_ : List[str] = new_value else: UpperCAmelCase_ : Tuple = nn.Parameter(__lowerCAmelCase , requires_grad=old_value.requires_grad ) UpperCAmelCase_ : Tuple = new_value def lowerCamelCase_ ( _a : List[str] , _a : Optional[int]=None , _a : List[str]=None , _a : Union[str, Any]=None , _a : str=False ): '''simple docstring''' for name, module in model.named_children(): if current_key_name is None: UpperCAmelCase_ : Dict = [] current_key_name.append(__lowerCAmelCase ) if (isinstance(__lowerCAmelCase , nn.Linear ) or isinstance(__lowerCAmelCase , __lowerCAmelCase )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in """.""".join(__lowerCAmelCase ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = module.weight.shape else: UpperCAmelCase_ : int = module.in_features UpperCAmelCase_ : Union[str, Any] = module.out_features if quantization_config.quantization_method() == "llm_int8": UpperCAmelCase_ : List[Any] = bnb.nn.LinearabitLt( __lowerCAmelCase , __lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) UpperCAmelCase_ : str = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: UpperCAmelCase_ : Any = bnb.nn.Linearabit( __lowerCAmelCase , __lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) UpperCAmelCase_ : List[Any] = True # Store the module class in case we need to transpose the weight later UpperCAmelCase_ : Optional[Any] = type(__lowerCAmelCase ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(__lowerCAmelCase ) if len(list(module.children() ) ) > 0: UpperCAmelCase_ , UpperCAmelCase_ : int = _replace_with_bnb_linear( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_been_replaced=__lowerCAmelCase , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCamelCase_ ( _a : Union[str, Any] , _a : str=None , _a : Optional[Any]=None , _a : str=None ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert UpperCAmelCase_ , UpperCAmelCase_ : int = _replace_with_bnb_linear( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def lowerCamelCase_ ( *_a : Any , **_a : Any ): '''simple docstring''' warnings.warn( """`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , __lowerCAmelCase , ) return replace_with_bnb_linear(*__lowerCAmelCase , **__lowerCAmelCase ) def lowerCamelCase_ ( *_a : int , **_a : Any ): '''simple docstring''' warnings.warn( """`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , __lowerCAmelCase , ) return set_module_quantized_tensor_to_device(*__lowerCAmelCase , **__lowerCAmelCase ) def lowerCamelCase_ ( _a : int ): '''simple docstring''' UpperCAmelCase_ : int = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() UpperCAmelCase_ : Optional[int] = find_tied_parameters(__lowerCAmelCase ) # For compatibility with Accelerate < 0.18 if isinstance(__lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase_ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: UpperCAmelCase_ : Dict = sum(__lowerCAmelCase , [] ) UpperCAmelCase_ : List[str] = len(__lowerCAmelCase ) > 0 # Check if it is a base model UpperCAmelCase_ : Tuple = not hasattr(__lowerCAmelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCAmelCase_ : Optional[Any] = list(model.named_children() ) UpperCAmelCase_ : List[str] = [list_modules[-1][0]] # add last module together with tied weights UpperCAmelCase_ : Dict = set(__lowerCAmelCase ) - set(__lowerCAmelCase ) UpperCAmelCase_ : List[str] = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase ) # remove ".weight" from the keys UpperCAmelCase_ : Optional[Any] = [""".weight""", """.bias"""] UpperCAmelCase_ : Dict = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCAmelCase_ : Optional[int] = name.replace(__lowerCAmelCase , """""" ) filtered_module_names.append(__lowerCAmelCase ) return filtered_module_names
353
def lowerCamelCase_ ( _a : int ): '''simple docstring''' if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(_a , _a ): raise TypeError("""Input value must be a 'int' type""" ) return bin(_a ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
59
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = '▁' UpperCAmelCase_ : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'} UpperCAmelCase_ : List[str] = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } UpperCAmelCase_ : int = { 'xlm-roberta-base': 512, 'xlm-roberta-large': 512, 'xlm-roberta-large-finetuned-conll02-dutch': 512, 'xlm-roberta-large-finetuned-conll02-spanish': 512, 'xlm-roberta-large-finetuned-conll03-english': 512, 'xlm-roberta-large-finetuned-conll03-german': 512, } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Union[str, Any] = VOCAB_FILES_NAMES snake_case__ : Dict = PRETRAINED_VOCAB_FILES_MAP snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE__ : str="</s>" , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE__ : str="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Any , ) -> None: a_ : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) a_ : Optional[int] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token a_ : Union[str, Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab a_ : List[str] = 1 a_ : Any = len(self.sp_model ) + self.fairseq_offset a_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : List[str] ) -> Optional[int]: a_ : Any = self.__dict__.copy() a_ : int = None a_ : List[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]: a_ : int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): a_ : Tuple = {} a_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a_ : Optional[Any] = [self.cls_token_id] a_ : Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: a_ : int = [self.sep_token_id] a_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: a_ : Tuple = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a_ : Tuple = self.sp_model.PieceToId(__UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]: a_ : str = ''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ' ' ).strip() return out_string def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a_ : Any = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , 'wb' ) as fi: a_ : Tuple = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
32
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def _a ( a :List[Any] ) -> Optional[int]: a = [] embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", F"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", F"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", F"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", F"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def _a ( a :List[Any] , a :Optional[int] ) -> Dict: a = [] attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def _a ( a :Any ) -> List[Any]: a = [] token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') ) return token def _a ( ) -> Optional[int]: a = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def _a ( a :Tuple , a :Optional[int] , a :List[Any] , a :Union[str, Any] ) -> Optional[int]: a = '''imagenet-1k-id2label.json''' a = 1_000 a = '''huggingface/label-files''' a = num_labels a = json.load(open(cached_download(hf_hub_url(a , a , repo_type='''dataset''' ) ) , '''r''' ) ) a = {int(a ): v for k, v in idalabel.items()} a = idalabel a = {v: k for k, v in idalabel.items()} a = a = CvtConfig(num_labels=a , idalabel=a , labelaid=a ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": a = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": a = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: a = [2, 2, 20] a = [3, 12, 16] a = [192, 768, 1_024] a = CvtForImageClassification(a ) a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) a = image_size a = torch.load(a , map_location=torch.device('''cpu''' ) ) a = OrderedDict() a = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: a = list_of_state_dict + cls_token(a ) a = list_of_state_dict + embeddings(a ) for cnt in range(config.depth[idx] ): a = list_of_state_dict + attention(a , a ) a = list_of_state_dict + final() for gg in list_of_state_dict: print(a ) for i in range(len(a ) ): a = original_weights[list_of_state_dict[i][1]] model.load_state_dict(a ) model.save_pretrained(a ) image_processor.save_pretrained(a ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=384, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) UpperCAmelCase__ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
0
0
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=__snake_case , default=__snake_case , required=__snake_case , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=__snake_case , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=__snake_case , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=__snake_case , default=42 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=__snake_case , default=0 , help='cuda_id.' , ) UpperCAmelCase_ : List[str] = parser.parse_args() return args def lowercase__ ( __snake_case : Optional[Any] , __snake_case : int , __snake_case : Any ): '''simple docstring''' if not len(__snake_case ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) UpperCAmelCase_ , UpperCAmelCase_ : str = imgs[0].size UpperCAmelCase_ : List[str] = Image.new('RGB' , size=(cols * w, rows * h) ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = grid.size for i, img in enumerate(__snake_case ): grid.paste(__snake_case , box=(i % cols * w, i // cols * h) ) return grid def lowercase__ ( __snake_case : List[str] , __snake_case : List[str]="robotic cat with wings" , __snake_case : Tuple=7.5 , __snake_case : List[Any]=50 , __snake_case : str=1 , __snake_case : Tuple=42 , ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = torch.Generator(pipeline.device ).manual_seed(__snake_case ) UpperCAmelCase_ : Union[str, Any] = pipeline( __snake_case , guidance_scale=__snake_case , num_inference_steps=__snake_case , generator=__snake_case , num_images_per_prompt=__snake_case , ).images UpperCAmelCase_ : Optional[int] = int(math.sqrt(__snake_case ) ) UpperCAmelCase_ : List[str] = image_grid(__snake_case , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __UpperCAmelCase = parse_args() # Load models and create wrapper for stable diffusion __UpperCAmelCase = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') __UpperCAmelCase = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') __UpperCAmelCase = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') __UpperCAmelCase = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') __UpperCAmelCase = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __UpperCAmelCase = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): __UpperCAmelCase = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: __UpperCAmelCase = unet.to(torch.device('cuda', args.cuda_id)) __UpperCAmelCase = pipeline.to(unet.device) __UpperCAmelCase , __UpperCAmelCase = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) __UpperCAmelCase = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
145
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __UpperCAmelCase = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' __UpperCAmelCase = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' __UpperCAmelCase = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def lowercase__ ( __snake_case : Optional[int] ): '''simple docstring''' def remove_articles(__snake_case : Tuple ): UpperCAmelCase_ : Optional[int] = re.compile(R'\b(a|an|the)\b' , re.UNICODE ) return re.sub(__snake_case , ' ' , __snake_case ) def white_space_fix(__snake_case : int ): return " ".join(text.split() ) def remove_punc(__snake_case : int ): UpperCAmelCase_ : Optional[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__snake_case : List[str] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) ) def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ): '''simple docstring''' return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Tuple = [any(compute_exact(__snake_case , __snake_case ) for ref in refs ) for pred, refs in zip(__snake_case , __snake_case )] return (sum(__snake_case ) / len(__snake_case )) * 100 def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : str = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCAmelCase_ : str = Counter(__snake_case ) UpperCAmelCase_ : List[Any] = Counter(__snake_case ) UpperCAmelCase_ : int = Counter() for sgram, scount in sgramcounter.items(): UpperCAmelCase_ : Any = scount * numref UpperCAmelCase_ : List[Any] = Counter(__snake_case ) UpperCAmelCase_ : Dict = Counter() for cgram, ccount in cgramcounter.items(): UpperCAmelCase_ : int = ccount * numref # KEEP UpperCAmelCase_ : Optional[Any] = sgramcounter_rep & cgramcounter_rep UpperCAmelCase_ : Any = keepgramcounter_rep & rgramcounter UpperCAmelCase_ : Union[str, Any] = sgramcounter_rep & rgramcounter UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[Any] = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase_ : Optional[Any] = 1 UpperCAmelCase_ : Optional[Any] = 1 if len(__snake_case ) > 0: UpperCAmelCase_ : List[str] = keeptmpscorea / len(__snake_case ) if len(__snake_case ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCAmelCase_ : List[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCAmelCase_ : List[Any] = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCAmelCase_ : List[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCAmelCase_ : Optional[int] = sgramcounter_rep - cgramcounter_rep UpperCAmelCase_ : Dict = delgramcounter_rep - rgramcounter UpperCAmelCase_ : Optional[Any] = sgramcounter_rep - rgramcounter UpperCAmelCase_ : str = 0 UpperCAmelCase_ : str = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase_ : List[Any] = 1 if len(__snake_case ) > 0: UpperCAmelCase_ : Dict = deltmpscorea / len(__snake_case ) # ADDITION UpperCAmelCase_ : Tuple = set(__snake_case ) - set(__snake_case ) UpperCAmelCase_ : Union[str, Any] = set(__snake_case ) & set(__snake_case ) UpperCAmelCase_ : Dict = set(__snake_case ) - set(__snake_case ) UpperCAmelCase_ : List[str] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Any = 1 if len(__snake_case ) > 0: UpperCAmelCase_ : Dict = addtmpscore / len(__snake_case ) if len(__snake_case ) > 0: UpperCAmelCase_ : Optional[int] = addtmpscore / len(__snake_case ) UpperCAmelCase_ : Optional[Any] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCAmelCase_ : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : int = len(__snake_case ) UpperCAmelCase_ : List[str] = ssent.split(' ' ) UpperCAmelCase_ : Union[str, Any] = csent.split(' ' ) UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : int = [] UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : Tuple = [] for rsent in rsents: UpperCAmelCase_ : List[Any] = rsent.split(' ' ) UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : str = [] ragramslist.append(__snake_case ) for i in range(0 , len(__snake_case ) - 1 ): if i < len(__snake_case ) - 1: UpperCAmelCase_ : Tuple = ragrams[i] + ' ' + ragrams[i + 1] ragrams.append(__snake_case ) if i < len(__snake_case ) - 2: UpperCAmelCase_ : List[str] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] ragrams.append(__snake_case ) if i < len(__snake_case ) - 3: UpperCAmelCase_ : Union[str, Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3] ragrams.append(__snake_case ) ragramslist.append(__snake_case ) ragramslist.append(__snake_case ) ragramslist.append(__snake_case ) for i in range(0 , len(__snake_case ) - 1 ): if i < len(__snake_case ) - 1: UpperCAmelCase_ : str = sagrams[i] + ' ' + sagrams[i + 1] sagrams.append(__snake_case ) if i < len(__snake_case ) - 2: UpperCAmelCase_ : List[str] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] sagrams.append(__snake_case ) if i < len(__snake_case ) - 3: UpperCAmelCase_ : Any = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3] sagrams.append(__snake_case ) for i in range(0 , len(__snake_case ) - 1 ): if i < len(__snake_case ) - 1: UpperCAmelCase_ : Optional[int] = cagrams[i] + ' ' + cagrams[i + 1] cagrams.append(__snake_case ) if i < len(__snake_case ) - 2: UpperCAmelCase_ : Tuple = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] cagrams.append(__snake_case ) if i < len(__snake_case ) - 3: UpperCAmelCase_ : Union[str, Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3] cagrams.append(__snake_case ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : int = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : str = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : int = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case ) UpperCAmelCase_ : List[str] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCAmelCase_ : Optional[Any] = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCAmelCase_ : List[str] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCAmelCase_ : Dict = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def lowercase__ ( __snake_case : List[Any] , __snake_case : bool = True , __snake_case : str = "13a" , __snake_case : bool = True ): '''simple docstring''' if lowercase: UpperCAmelCase_ : Optional[Any] = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCAmelCase_ : Union[str, Any] = sacrebleu.metrics.bleu._get_tokenizer(__snake_case )()(__snake_case ) else: UpperCAmelCase_ : Union[str, Any] = sacrebleu.TOKENIZERS[tokenizer]()(__snake_case ) elif tokenizer == "moses": UpperCAmelCase_ : Optional[Any] = sacremoses.MosesTokenizer().tokenize(__snake_case , return_str=__snake_case , escape=__snake_case ) elif tokenizer == "penn": UpperCAmelCase_ : Dict = sacremoses.MosesTokenizer().penn_tokenize(__snake_case , return_str=__snake_case ) else: UpperCAmelCase_ : int = sentence if not return_str: UpperCAmelCase_ : Any = normalized_sent.split() return normalized_sent def lowercase__ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Dict ): '''simple docstring''' if not (len(__snake_case ) == len(__snake_case ) == len(__snake_case )): raise ValueError('Sources length must match predictions and references lengths.' ) UpperCAmelCase_ : Tuple = 0 for src, pred, refs in zip(__snake_case , __snake_case , __snake_case ): sari_score += SARIsent(normalize(__snake_case ) , normalize(__snake_case ) , [normalize(__snake_case ) for sent in refs] ) UpperCAmelCase_ : Any = sari_score / len(__snake_case ) return 100 * sari_score def lowercase__ ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : str="exp" , __snake_case : Any=None , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=False , __snake_case : List[str]=False , ): '''simple docstring''' UpperCAmelCase_ : int = len(references[0] ) if any(len(__snake_case ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) UpperCAmelCase_ : Dict = [[refs[i] for refs in references] for i in range(__snake_case )] UpperCAmelCase_ : str = sacrebleu.corpus_bleu( __snake_case , __snake_case , smooth_method=__snake_case , smooth_value=__snake_case , force=__snake_case , lowercase=__snake_case , use_effective_order=__snake_case , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase (datasets.Metric ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=[ 'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py', 'https://github.com/cocoxu/simplification/blob/master/SARI.py', 'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py', 'https://github.com/mjpost/sacreBLEU', ] , reference_urls=[ 'https://www.aclweb.org/anthology/Q16-1029.pdf', 'https://github.com/mjpost/sacreBLEU', 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: UpperCAmelCase_ : List[Any] = {} result.update({'sari': compute_sari(sources=_UpperCamelCase , predictions=_UpperCamelCase , references=_UpperCamelCase )} ) result.update({'sacrebleu': compute_sacrebleu(predictions=_UpperCamelCase , references=_UpperCamelCase )} ) result.update({'exact': compute_em(predictions=_UpperCamelCase , references=_UpperCamelCase )} ) return result
145
1
"""simple docstring""" def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]: """simple docstring""" assert x is not None assert y is not None lowerCAmelCase_ : Optional[Any] = len(_lowercase ) lowerCAmelCase_ : List[Any] = len(_lowercase ) # declaring the array for storing the dp values lowerCAmelCase_ : int = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): lowerCAmelCase_ : List[Any] = 1 if x[i - 1] == y[j - 1] else 0 lowerCAmelCase_ : Any = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) lowerCAmelCase_ : List[Any] = "" lowerCAmelCase_ : Dict = m, n while i > 0 and j > 0: lowerCAmelCase_ : List[str] = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: lowerCAmelCase_ : int = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": lowercase__ = '''AGGTAB''' lowercase__ = '''GXTXAYB''' lowercase__ = 4 lowercase__ = '''GTAB''' lowercase__ = longest_common_subsequence(a, b) print("""len =""", ln, """, sub-sequence =""", subseq) import doctest doctest.testmod()
241
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(F'''{price_plus_tax(100, 0.25) = }''') print(F'''{price_plus_tax(125.50, 0.05) = }''')
105
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Dict = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase_ ( __snake_case ): _lowerCamelCase = 'beit' def __init__( self , lowercase_=8_192 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=224 , lowercase_=16 , lowercase_=3 , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=0.1 , lowercase_=0.1 , lowercase_=True , lowercase_=[3, 5, 7, 11] , lowercase_=[1, 2, 3, 6] , lowercase_=True , lowercase_=0.4 , lowercase_=256 , lowercase_=1 , lowercase_=False , lowercase_=255 , **lowercase_ , ): super().__init__(**lowercase_ ) _snake_case : Union[str, Any] = vocab_size _snake_case : int = hidden_size _snake_case : List[str] = num_hidden_layers _snake_case : str = num_attention_heads _snake_case : List[str] = intermediate_size _snake_case : List[Any] = hidden_act _snake_case : Optional[Any] = hidden_dropout_prob _snake_case : Optional[Any] = attention_probs_dropout_prob _snake_case : int = initializer_range _snake_case : List[str] = layer_norm_eps _snake_case : Dict = image_size _snake_case : Any = patch_size _snake_case : Optional[int] = num_channels _snake_case : Optional[Any] = use_mask_token _snake_case : Tuple = use_absolute_position_embeddings _snake_case : Optional[int] = use_relative_position_bias _snake_case : Optional[int] = use_shared_relative_position_bias _snake_case : List[str] = layer_scale_init_value _snake_case : Union[str, Any] = drop_path_rate _snake_case : int = use_mean_pooling # decode head attributes (semantic segmentation) _snake_case : List[str] = out_indices _snake_case : int = pool_scales # auxiliary head attributes (semantic segmentation) _snake_case : List[str] = use_auxiliary_head _snake_case : Any = auxiliary_loss_weight _snake_case : Optional[Any] = auxiliary_channels _snake_case : Any = auxiliary_num_convs _snake_case : Dict = auxiliary_concat_input _snake_case : Dict = semantic_loss_ignore_index class lowercase_ ( __snake_case ): _lowerCamelCase = version.parse('1.11' ) @property def UpperCamelCase ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase ( self ): return 1e-4
358
def snake_case (__lowercase ) -> list[int]: '''simple docstring''' if num <= 0: raise ValueError("Input must be a positive integer" ) _snake_case : Any = [True] * (num + 1) _snake_case : str = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , __lowercase ): _snake_case : Optional[int] = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : Any = int(input('Enter a positive integer: ').strip()) print(prime_sieve_eratosthenes(user_num))
284
0
'''simple docstring''' import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowerCamelCase : List[Any] = logging.getLogger() def _lowerCAmelCase ( _UpperCamelCase : Path , _UpperCamelCase : list ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE ='\n'.join(_UpperCamelCase ) Path(_UpperCamelCase ).open('w' ).writelines(_UpperCamelCase ) lowerCamelCase : Tuple = "patrickvonplaten/t5-tiny-random" lowerCamelCase : Tuple = "sshleifer/bart-tiny-random" lowerCamelCase : List[Any] = "sshleifer/tiny-mbart" lowerCamelCase : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class A__ ( A__ ): def A ( self : Any , _a : Dict ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _SCREAMING_SNAKE_CASE =input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _SCREAMING_SNAKE_CASE =[' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(_a , _a ) _SCREAMING_SNAKE_CASE =str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _SCREAMING_SNAKE_CASE ='translation_en_to_de' if model == T5_TINY else 'summarization' _SCREAMING_SNAKE_CASE =f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split() with patch.object(_a , 'argv' , _a ): run_generate() assert Path(_a ).exists() # os.remove(Path(output_file_name)) def A ( self : List[str] ) -> str: '''simple docstring''' self.run_eval_tester(_a ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def A ( self : Optional[Any] , _a : Tuple ) -> List[str]: '''simple docstring''' self.run_eval_tester(_a ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def A ( self : Dict , _a : Dict ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _SCREAMING_SNAKE_CASE =input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _SCREAMING_SNAKE_CASE ={ 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() ) _SCREAMING_SNAKE_CASE =str(tmp_dir / 'scores.json' ) _SCREAMING_SNAKE_CASE =str(tmp_dir / 'val.target' ) _dump_articles(_a , text['en'] ) _dump_articles(_a , text['de'] ) _SCREAMING_SNAKE_CASE ='translation_en_to_de' if model == T5_TINY else 'summarization' _SCREAMING_SNAKE_CASE =f"\n run_eval_search.py\n {model}\n {str(_a )}\n {str(_a )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(_a , 'argv' , _a ): with CaptureStdout() as cs: run_search() _SCREAMING_SNAKE_CASE =[' num_beams | length_penalty', model, 'Best score args'] _SCREAMING_SNAKE_CASE =['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(_a ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(_a ).exists() os.remove(Path(_a ) )
47
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase = mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: lowercase = max( mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , ) lowercase = val return f[i][j] def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase = dp[i - 1][w_] return dp[n][w_], dp def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if not (isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) lowercase = len(__SCREAMING_SNAKE_CASE ) if num_items != len(__SCREAMING_SNAKE_CASE ): lowercase = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(__SCREAMING_SNAKE_CASE )} values''' ) raise ValueError(__SCREAMING_SNAKE_CASE ) for i in range(__SCREAMING_SNAKE_CASE ): if not isinstance(wt[i] , __SCREAMING_SNAKE_CASE ): lowercase = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(__SCREAMING_SNAKE_CASE ) lowercase , lowercase = knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase = set() _construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return optimal_val, example_optional_set def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: optimal_set.add(__SCREAMING_SNAKE_CASE ) _construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase = [3, 2, 4, 4] UpperCAmelCase = [4, 3, 2, 3] UpperCAmelCase = 4 UpperCAmelCase = 6 UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('''optimal_value = ''', optimal_solution) print('''An optimal subset corresponding to the optimal value''', optimal_subset)
195
0
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class __lowercase ( A ): '''simple docstring''' _A : List[Any] = (DPMSolverSDEScheduler,) _A : str = 10 def A_ ( self : Any , **_a : Dict ): UpperCamelCase__ = { '''num_train_timesteps''': 1_100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**_a ) return config def A_ ( self : Optional[Any] ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=_a ) def A_ ( self : List[str] ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def A_ ( self : Optional[int] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def A_ ( self : Optional[int] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def A_ ( self : List[str] ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase__ = self.dummy_model() UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase__ = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase__ = scheduler.scale_model_input(_a , _a ) UpperCamelCase__ = model(_a , _a ) UpperCamelCase__ = scheduler.step(_a , _a , _a ) UpperCamelCase__ = output.prev_sample UpperCamelCase__ = torch.sum(torch.abs(_a ) ) UpperCamelCase__ = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3 def A_ ( self : Optional[Any] ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCamelCase__ = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase__ = self.dummy_model() UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase__ = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase__ = scheduler.scale_model_input(_a , _a ) UpperCamelCase__ = model(_a , _a ) UpperCamelCase__ = scheduler.step(_a , _a , _a ) UpperCamelCase__ = output.prev_sample UpperCamelCase__ = torch.sum(torch.abs(_a ) ) UpperCamelCase__ = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3 def A_ ( self : Dict ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) UpperCamelCase__ = self.dummy_model() UpperCamelCase__ = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: UpperCamelCase__ = scheduler.scale_model_input(_a , _a ) UpperCamelCase__ = model(_a , _a ) UpperCamelCase__ = scheduler.step(_a , _a , _a ) UpperCamelCase__ = output.prev_sample UpperCamelCase__ = torch.sum(torch.abs(_a ) ) UpperCamelCase__ = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3 def A_ ( self : Optional[int] ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) UpperCamelCase__ = self.dummy_model() UpperCamelCase__ = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma UpperCamelCase__ = sample.to(_a ) for t in scheduler.timesteps: UpperCamelCase__ = scheduler.scale_model_input(_a , _a ) UpperCamelCase__ = model(_a , _a ) UpperCamelCase__ = scheduler.step(_a , _a , _a ) UpperCamelCase__ = output.prev_sample UpperCamelCase__ = torch.sum(torch.abs(_a ) ) UpperCamelCase__ = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
35
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowercase = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS) lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowercase = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") lowercase = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCamelCase_ ( UpperCamelCase__ : str ): '''simple docstring''' UpperCamelCase__ = None # source code of `config_class` UpperCamelCase__ = inspect.getsource(UpperCamelCase__ ) UpperCamelCase__ = _re_checkpoint.findall(UpperCamelCase__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): UpperCamelCase__ = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase__ = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCamelCase__ = ckpt_name break return checkpoint def lowerCamelCase_ ( ): '''simple docstring''' UpperCamelCase__ = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCamelCase__ = get_checkpoint_from_config_class(UpperCamelCase__ ) UpperCamelCase__ = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: UpperCamelCase__ = '''\n'''.join(sorted(UpperCamelCase__ ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
35
1
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowerCAmelCase__ : Union[str, Any] = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowerCAmelCase__ : Any = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowerCAmelCase__ : Union[str, Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_000)) def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = len([g for position, g in enumerate(lowerCamelCase ) if g == main_target[position]] ) return (item, float(lowerCamelCase )) def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = random.randint(0 , len(lowerCamelCase ) - 1 ) UpperCAmelCase__ = parent_a[:random_slice] + parent_a[random_slice:] UpperCAmelCase__ = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = list(lowerCamelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: UpperCAmelCase__ = random.choice(lowerCamelCase ) return "".join(lowerCamelCase ) def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ): UpperCAmelCase__ = [] # Generate more children proportionally to the fitness score. UpperCAmelCase__ = int(parent_a[1] * 1_0_0 ) + 1 UpperCAmelCase__ = 1_0 if child_n >= 1_0 else child_n for _ in range(lowerCamelCase ): UpperCAmelCase__ = population_score[random.randint(0 , lowerCamelCase )][0] UpperCAmelCase__ , UpperCAmelCase__ = crossover(parent_a[0] , lowerCamelCase ) # Append new string to the population list. pop.append(mutate(lowerCamelCase , lowerCamelCase ) ) pop.append(mutate(lowerCamelCase , lowerCamelCase ) ) return pop def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: UpperCAmelCase__ = f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(lowerCamelCase ) # Verify that the target contains no genes besides the ones inside genes variable. UpperCAmelCase__ = sorted({c for c in target if c not in genes} ) if not_in_genes_list: UpperCAmelCase__ = f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(lowerCamelCase ) # Generate random starting population. UpperCAmelCase__ = [] for _ in range(lowerCamelCase ): population.append(''.join([random.choice(lowerCamelCase ) for i in range(len(lowerCamelCase ) )] ) ) # Just some logs to know what the algorithms is doing. UpperCAmelCase__ , UpperCAmelCase__ = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowerCamelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. UpperCAmelCase__ = [evaluate(lowerCamelCase , lowerCamelCase ) for item in population] # Check if there is a matching evolution. UpperCAmelCase__ = sorted(lowerCamelCase , key=lambda lowerCamelCase : x[1] , reverse=lowerCamelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 1_0 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. UpperCAmelCase__ = population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowerCamelCase ) # Normalize population score to be between 0 and 1. UpperCAmelCase__ = [ (item, score / len(lowerCamelCase )) for item, score in population_score ] # This is selection for i in range(lowerCamelCase ): population.extend(select(population_score[int(lowerCamelCase )] , lowerCamelCase , lowerCamelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowerCamelCase ) > N_POPULATION: break if __name__ == "__main__": lowerCAmelCase__ : Optional[Any] = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) lowerCAmelCase__ : int = list( ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm' 'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\' ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = basic(target_str, genes_list) print( F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
98
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list: A: Dict = length or len(__lowercase ) A: Dict = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A , A: Tuple = list_data[i + 1], list_data[i] A: Union[str, Any] = True return list_data if not swapped else bubble_sort(__lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
0
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def UpperCamelCase( lowercase_ ) -> Any: '''simple docstring''' snake_case_ = {} snake_case_ = tokenizer(example["""content"""] , truncation=lowercase_ )["""input_ids"""] snake_case_ = len(example["""content"""] ) / len(output["""input_ids"""] ) return output lowerCamelCase_ = HfArgumentParser(PretokenizationArguments) lowerCamelCase_ = parser.parse_args() if args.num_workers is None: lowerCamelCase_ = multiprocessing.cpu_count() lowerCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCamelCase_ = time.time() lowerCamelCase_ = load_dataset(args.dataset_name, split='''train''') print(f"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCamelCase_ = time.time() lowerCamelCase_ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCamelCase_ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
359
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCamelCase_ = { '''yjernite/retribert-base-uncased''': 512, } lowerCamelCase_ = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class __lowerCamelCase ( __snake_case ): lowerCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES lowerCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION lowerCamelCase_ : Union[str, Any] = RetriBertTokenizer lowerCamelCase_ : str = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> List[Any]: super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**lowerCamelCase ) snake_case_ = do_lower_case def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase=None ) -> str: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase ) return tuple(lowerCamelCase )
34
0
'''simple docstring''' from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
55
"""simple docstring""" import argparse import json from tqdm import tqdm def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--src_path' , type=__snake_case , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , ) parser.add_argument( '--evaluation_set' , type=__snake_case , help='where to store parsed evaluation_set file' , ) parser.add_argument( '--gold_data_path' , type=__snake_case , help='where to store parsed gold_data_path file' , ) lowercase = parser.parse_args() with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open( args.gold_data_path , 'w' ) as gold_file: lowercase = json.load(__snake_case ) for dpr_record in tqdm(__snake_case ): lowercase = dpr_record['question'] lowercase = [context['title'] for context in dpr_record['positive_ctxs']] eval_file.write(question + '\n' ) gold_file.write('\t'.join(__snake_case ) + '\n' ) if __name__ == "__main__": main()
220
0
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# lowerCAmelCase_ = [ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] lowerCAmelCase_ = [ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] lowerCAmelCase_ = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks lowerCAmelCase_ = f'''down_blocks.{i}.resnets.{j}.''' lowerCAmelCase_ = f'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 lowerCAmelCase_ = f'''down_blocks.{i}.attentions.{j}.''' lowerCAmelCase_ = f'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks lowerCAmelCase_ = f'''up_blocks.{i}.resnets.{j}.''' lowerCAmelCase_ = f'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 lowerCAmelCase_ = f'''up_blocks.{i}.attentions.{j}.''' lowerCAmelCase_ = f'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 lowerCAmelCase_ = f'''down_blocks.{i}.downsamplers.0.conv.''' lowerCAmelCase_ = f'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 lowerCAmelCase_ = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase_ = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) lowerCAmelCase_ = 'mid_block.attentions.0.' lowerCAmelCase_ = 'middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): lowerCAmelCase_ = f'''mid_block.resnets.{j}.''' lowerCAmelCase_ = f'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : str = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowercase : Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowercase : Tuple = v.replace(_A , _A ) lowercase : Optional[int] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowercase : Optional[int] = v.replace(_A , _A ) lowercase : Optional[int] = v lowercase : int = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# lowerCAmelCase_ = [ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): lowerCAmelCase_ = f'''encoder.down_blocks.{i}.resnets.{j}.''' lowerCAmelCase_ = f'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: lowerCAmelCase_ = f'''down_blocks.{i}.downsamplers.0.''' lowerCAmelCase_ = f'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) lowerCAmelCase_ = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase_ = f'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): lowerCAmelCase_ = f'''decoder.up_blocks.{i}.resnets.{j}.''' lowerCAmelCase_ = f'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): lowerCAmelCase_ = f'''mid_block.resnets.{i}.''' lowerCAmelCase_ = f'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) lowerCAmelCase_ = [ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] def snake_case( __magic_name__ ) -> List[str]: '''simple docstring''' return w.reshape(*w.shape , 1 , 1 ) def snake_case( __magic_name__ ) -> Union[str, Any]: '''simple docstring''' lowercase : int = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowercase : Optional[Any] = v.replace(_A , _A ) lowercase : Dict = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowercase : Union[str, Any] = v.replace(_A , _A ) lowercase : Any = v lowercase : List[str] = {v: vae_state_dict[k] for k, v in mapping.items()} lowercase : List[Any] = ['''q''', '''k''', '''v''', '''proj_out'''] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F"""mid.attn_1.{weight_name}.weight""" in k: print(F"""Reshaping {k} for SD format""" ) lowercase : List[Any] = reshape_weight_for_sd(_A ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# lowerCAmelCase_ = [ # (stable-diffusion, HF Diffusers) ('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'), ] lowerCAmelCase_ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} lowerCAmelCase_ = re.compile('|'.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp lowerCAmelCase_ = {'q': 0, 'k': 1, 'v': 2} def snake_case( __magic_name__ ) -> str: '''simple docstring''' lowercase : str = {} lowercase : Optional[int] = {} lowercase : Optional[int] = {} for k, v in text_enc_dict.items(): if ( k.endswith('''.self_attn.q_proj.weight''' ) or k.endswith('''.self_attn.k_proj.weight''' ) or k.endswith('''.self_attn.v_proj.weight''' ) ): lowercase : Union[str, Any] = k[: -len('''.q_proj.weight''' )] lowercase : Any = k[-len('''q_proj.weight''' )] if k_pre not in capture_qkv_weight: lowercase : Optional[int] = [None, None, None] lowercase : Tuple = v continue if ( k.endswith('''.self_attn.q_proj.bias''' ) or k.endswith('''.self_attn.k_proj.bias''' ) or k.endswith('''.self_attn.v_proj.bias''' ) ): lowercase : str = k[: -len('''.q_proj.bias''' )] lowercase : Optional[Any] = k[-len('''q_proj.bias''' )] if k_pre not in capture_qkv_bias: lowercase : List[str] = [None, None, None] lowercase : Union[str, Any] = v continue lowercase : Any = textenc_pattern.sub(lambda __magic_name__ : protected[re.escape(m.group(0 ) )] , _A ) lowercase : Optional[Any] = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' ) lowercase : Any = textenc_pattern.sub(lambda __magic_name__ : protected[re.escape(m.group(0 ) )] , _A ) lowercase : Optional[int] = torch.cat(_A ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' ) lowercase : int = textenc_pattern.sub(lambda __magic_name__ : protected[re.escape(m.group(0 ) )] , _A ) lowercase : Any = torch.cat(_A ) return new_state_dict def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' return text_enc_dict if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.' ) lowerCAmelCase_ = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors lowerCAmelCase_ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors') lowerCAmelCase_ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors') lowerCAmelCase_ = osp.join(args.model_path, 'text_encoder', 'model.safetensors') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): lowerCAmelCase_ = load_file(unet_path, device='cpu') else: lowerCAmelCase_ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin') lowerCAmelCase_ = torch.load(unet_path, map_location='cpu') if osp.exists(vae_path): lowerCAmelCase_ = load_file(vae_path, device='cpu') else: lowerCAmelCase_ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin') lowerCAmelCase_ = torch.load(vae_path, map_location='cpu') if osp.exists(text_enc_path): lowerCAmelCase_ = load_file(text_enc_path, device='cpu') else: lowerCAmelCase_ = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin') lowerCAmelCase_ = torch.load(text_enc_path, map_location='cpu') # Convert the UNet model lowerCAmelCase_ = convert_unet_state_dict(unet_state_dict) lowerCAmelCase_ = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model lowerCAmelCase_ = convert_vae_state_dict(vae_state_dict) lowerCAmelCase_ = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper lowerCAmelCase_ = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm lowerCAmelCase_ = {'transformer.' + k: v for k, v in text_enc_dict.items()} lowerCAmelCase_ = convert_text_enc_state_dict_vaa(text_enc_dict) lowerCAmelCase_ = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()} else: lowerCAmelCase_ = convert_text_enc_state_dict(text_enc_dict) lowerCAmelCase_ = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint lowerCAmelCase_ = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: lowerCAmelCase_ = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: lowerCAmelCase_ = {'state_dict': state_dict} torch.save(state_dict, args.checkpoint_path)
355
import math def snake_case( __magic_name__ ) -> bool: '''simple docstring''' lowercase : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__magic_name__ ) def snake_case( __magic_name__ = 1 / 1_23_45 ) -> int: '''simple docstring''' lowercase : Union[str, Any] = 0 lowercase : str = 0 lowercase : Optional[int] = 3 while True: lowercase : Any = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__magic_name__ ): lowercase : Any = int(__magic_name__ ) total_partitions += 1 if check_partition_perfect(__magic_name__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__magic_name__ ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
116
0
from __future__ import annotations from typing import Any class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0) -> None: _A , _A : Any = row, column _A : str = [[default_value for c in range(__lowerCamelCase)] for r in range(__lowerCamelCase)] def __str__( self) -> str: _A : Any = F"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier _A : List[str] = 0 for row_vector in self.array: for obj in row_vector: _A : Any = max(__lowerCamelCase , len(str(__lowerCamelCase))) _A : Tuple = F"%{max_element_length}s" # Make string and return def single_line(__lowerCamelCase) -> str: nonlocal string_format_identifier _A : Tuple = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector) line += "]" return line s += "\n".join(single_line(__lowerCamelCase) for row_vector in self.array) return s def __repr__( self) -> str: return str(self) def _lowerCamelCase ( self , __lowerCamelCase) -> bool: if not (isinstance(__lowerCamelCase , (list, tuple)) and len(__lowerCamelCase) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , __lowerCamelCase) -> Any: assert self.validate_indicies(__lowerCamelCase) return self.array[loc[0]][loc[1]] def __setitem__( self , __lowerCamelCase , __lowerCamelCase) -> None: assert self.validate_indicies(__lowerCamelCase) _A : Optional[int] = value def __add__( self , __lowerCamelCase) -> Matrix: assert isinstance(__lowerCamelCase , __lowerCamelCase) assert self.row == another.row and self.column == another.column # Add _A : Optional[int] = Matrix(self.row , self.column) for r in range(self.row): for c in range(self.column): _A : str = self[r, c] + another[r, c] return result def __neg__( self) -> Matrix: _A : Any = Matrix(self.row , self.column) for r in range(self.row): for c in range(self.column): _A : Dict = -self[r, c] return result def __sub__( self , __lowerCamelCase) -> Matrix: return self + (-another) def __mul__( self , __lowerCamelCase) -> Matrix: if isinstance(__lowerCamelCase , (int, float)): # Scalar multiplication _A : Optional[Any] = Matrix(self.row , self.column) for r in range(self.row): for c in range(self.column): _A : Dict = self[r, c] * another return result elif isinstance(__lowerCamelCase , __lowerCamelCase): # Matrix multiplication assert self.column == another.row _A : str = Matrix(self.row , another.column) for r in range(self.row): for c in range(another.column): for i in range(self.column): result[r, c] += self[r, i] * another[i, c] return result else: _A : List[str] = F"Unsupported type given for another ({type(__lowerCamelCase)})" raise TypeError(__lowerCamelCase) def _lowerCamelCase ( self) -> Matrix: _A : Any = Matrix(self.column , self.row) for r in range(self.row): for c in range(self.column): _A : Optional[int] = self[r, c] return result def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Any: assert isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(__lowerCamelCase , __lowerCamelCase) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate _A : Any = v.transpose() _A : Optional[Any] = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _UpperCAmelCase (): # a^(-1) _A : int = Matrix(3 , 3 , 0 ) for i in range(3 ): _A : Tuple = 1 print(f"a^(-1) is {ainv}" ) # u, v _A : List[Any] = Matrix(3 , 1 , 0 ) _A , _A , _A : Optional[Any] = 1, 2, -3 _A : Tuple = Matrix(3 , 1 , 0 ) _A , _A , _A : Optional[int] = 4, -2, 5 print(f"u is {u}" ) print(f"v is {v}" ) print(f"uv^T is {u * v.transpose()}" ) # Sherman Morrison print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}" ) def _UpperCAmelCase (): import doctest doctest.testmod() testa()
11
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-12 ) -> str: SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T SCREAMING_SNAKE_CASE__ : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T return jnp.matmul(__lowerCAmelCase , norm_emb_a.T ) class __a (nn.Module): '''simple docstring''' _SCREAMING_SNAKE_CASE :CLIPConfig _SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config ) SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Dense(self.config.projection_dim , use_bias=_a , dtype=self.dtype ) SCREAMING_SNAKE_CASE__ : Tuple = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.param( """special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) SCREAMING_SNAKE_CASE__ : Any = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) ) def __call__( self , _a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.vision_model(_a )[1] SCREAMING_SNAKE_CASE__ : str = self.visual_projection(_a ) SCREAMING_SNAKE_CASE__ : List[str] = jax_cosine_distance(_a , self.special_care_embeds ) SCREAMING_SNAKE_CASE__ : Optional[Any] = jax_cosine_distance(_a , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs SCREAMING_SNAKE_CASE__ : int = 0.0 SCREAMING_SNAKE_CASE__ : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment SCREAMING_SNAKE_CASE__ : Dict = jnp.round(_a , 3 ) SCREAMING_SNAKE_CASE__ : Dict = jnp.any(special_scores > 0 , axis=1 , keepdims=_a ) # Use a lower threshold if an image has any special care concept SCREAMING_SNAKE_CASE__ : Any = is_special_care * 0.01 SCREAMING_SNAKE_CASE__ : List[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.round(_a , 3 ) SCREAMING_SNAKE_CASE__ : List[str] = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :Dict = CLIPConfig _SCREAMING_SNAKE_CASE :Union[str, Any] = """clip_input""" _SCREAMING_SNAKE_CASE :Dict = FlaxStableDiffusionSafetyCheckerModule def __init__( self , _a , _a = None , _a = 0 , _a = jnp.floataa , _a = True , **_a , ) -> Optional[int]: """simple docstring""" if input_shape is None: SCREAMING_SNAKE_CASE__ : List[Any] = (1, 224, 224, 3) SCREAMING_SNAKE_CASE__ : Any = self.module_class(config=_a , dtype=_a , **_a ) super().__init__(_a , _a , input_shape=_a , seed=_a , dtype=_a , _do_init=_do_init ) def _a ( self , _a , _a , _a = None ) -> FrozenDict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = jax.random.normal(_a , _a ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = jax.random.split(_a ) SCREAMING_SNAKE_CASE__ : List[str] = {"""params""": params_rng, """dropout""": dropout_rng} SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.module.init(_a , _a )["""params"""] return random_params def __call__( self , _a , _a = None , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = jnp.transpose(_a , (0, 2, 3, 1) ) return self.module.apply( {"""params""": params or self.params} , jnp.array(_a , dtype=jnp.floataa ) , rngs={} , )
132
0
def _a ( a :float , a :int ) -> float: if digit_amount > 0: return round(number - int(a ) , a ) return number - int(a ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
352
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ = "▁" UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class lowercase_ ( lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = BertGenerationTokenizer __snake_case = False __snake_case = True def __lowerCAmelCase ( self : str ) ->str: """simple docstring""" super().setUp() a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : int ) ->Dict: """simple docstring""" a = '''<s>''' a = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(__UpperCAmelCase ) , 1_002 ) def __lowerCAmelCase ( self : List[str] ) ->List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def __lowerCAmelCase ( self : Tuple ) ->Optional[int]: """simple docstring""" a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) a = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , ) a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def __lowerCAmelCase ( self : List[Any] ) ->List[str]: """simple docstring""" return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) @slow def __lowerCAmelCase ( self : Any ) ->str: """simple docstring""" a = '''Hello World!''' a = [18_536, 2_260, 101] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def __lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" a = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) a = [ 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, ] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @require_torch @slow def __lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence a = list(self.big_tokenizer.get_vocab().keys() )[:10] a = ''' '''.join(__UpperCAmelCase ) a = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase ) a = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase ) a = BertGenerationConfig() a = BertGenerationEncoder(__UpperCAmelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__UpperCAmelCase ) model(**__UpperCAmelCase ) @slow def __lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" a = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
26
0
"""simple docstring""" class UpperCamelCase : # Public class to implement a graph def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> None: snake_case_ = row snake_case_ = col snake_case_ = graph def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> None: # Checking all 8 elements surrounding nth element snake_case_ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order snake_case_ = [-1, 0, 1, -1, 1, -1, 0, 1] snake_case_ = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k], j + col_nbr[k], __UpperCAmelCase): self.diffs(i + row_nbr[k], j + col_nbr[k], __UpperCAmelCase) def a_ ( self) -> int: # And finally, count all islands. snake_case_ = [[False for j in range(self.COL)] for i in range(self.ROW)] snake_case_ = 0 for i in range(self.ROW): for j in range(self.COL): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase) count += 1 return count
69
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[Any] = 'gptj' A_ : Optional[int] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Union[str, Any]: _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = rotary_dim _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = use_cache _a = bos_token_id _a = eos_token_id super().__init__( bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase ) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[Any]: super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ): # TODO: how to do that better? _a = 0 @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) _a = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _UpperCAmelCase ( self ) -> int: return self._config.n_layer @property def _UpperCAmelCase ( self ) -> int: return self._config.n_head def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = super(__UpperCAmelCase , self ).generate_dummy_inputs( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) # We need to order the input in the way they appears in the forward() _a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers ) ] _a = common_inputs['''attention_mask'''] if self.use_past: _a = ordered_inputs['''attention_mask'''].dtype _a = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self ) -> int: return 13
320
0
'''simple docstring''' from __future__ import annotations import typing from collections.abc import Iterable import numpy as np __lowerCAmelCase : Any =typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 __lowerCAmelCase : Optional[int] =typing.Union[np.floataa, int, float] # noqa: UP007 def UpperCamelCase ( _lowerCamelCase : Vector , _lowerCamelCase : Vector ): return np.sqrt(np.sum((np.asarray(__UpperCamelCase ) - np.asarray(__UpperCamelCase )) ** 2 ) ) def UpperCamelCase ( _lowerCamelCase : Vector , _lowerCamelCase : Vector ): return sum((va - va) ** 2 for va, va in zip(__UpperCamelCase , __UpperCamelCase ) ) ** (1 / 2) if __name__ == "__main__": def UpperCamelCase ( ): from timeit import timeit print("Without Numpy" ) print( timeit( "euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) ) print("With Numpy" ) print( timeit( "euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) ) benchmark()
369
'''simple docstring''' import unittest import numpy as np def UpperCamelCase ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray | None = None , ): A__ = np.shape(_lowerCamelCase ) A__ = np.shape(_lowerCamelCase ) A__ = np.shape(_lowerCamelCase ) if shape_a[0] != shape_b[0]: A__ = ( "Expected the same number of rows for A and B. " F"Instead found A of size {shape_a} and B of size {shape_b}" ) raise ValueError(_lowerCamelCase ) if shape_b[1] != shape_c[1]: A__ = ( "Expected the same number of columns for B and C. " F"Instead found B of size {shape_b} and C of size {shape_c}" ) raise ValueError(_lowerCamelCase ) A__ = pseudo_inv if a_inv is None: try: A__ = np.linalg.inv(_lowerCamelCase ) except np.linalg.LinAlgError: raise ValueError( "Input matrix A is not invertible. Cannot compute Schur complement." ) return mat_c - mat_b.T @ a_inv @ mat_b class UpperCAmelCase ( unittest.TestCase ): def UpperCAmelCase_ ( self :Any )-> None: A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A__ = np.array([[0, 3], [3, 0], [2, 3]] ) A__ = np.array([[2, 1], [6, 3]] ) A__ = schur_complement(lowercase_ , lowercase_ , lowercase_ ) A__ = np.block([[a, b], [b.T, c]] ) A__ = np.linalg.det(lowercase_ ) A__ = np.linalg.det(lowercase_ ) A__ = np.linalg.det(lowercase_ ) self.assertAlmostEqual(lowercase_ , det_a * det_s ) def UpperCAmelCase_ ( self :Dict )-> None: A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A__ = np.array([[0, 3], [3, 0], [2, 3]] ) A__ = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) def UpperCAmelCase_ ( self :Union[str, Any] )-> None: A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A__ = np.array([[0, 3], [3, 0], [2, 3]] ) A__ = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
123
0
import sys from collections import defaultdict class lowerCamelCase__ : '''simple docstring''' def __init__(self ) -> int: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = [] def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict: """simple docstring""" return self.node_position[vertex] def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> str: """simple docstring""" lowerCAmelCase__ : Optional[Any] = pos def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: lowerCAmelCase__ : Optional[int] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: lowerCAmelCase__ : Tuple = 2 * start + 1 else: lowerCAmelCase__ : Union[str, Any] = 2 * start + 2 if heap[smallest_child] < heap[start]: lowerCAmelCase__ , lowerCAmelCase__ : Dict = heap[smallest_child], positions[smallest_child] lowerCAmelCase__ , lowerCAmelCase__ : str = ( heap[start], positions[start], ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = temp, tempa lowerCAmelCase__ : Any = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,__lowerCamelCase ) self.top_to_bottom(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> str: """simple docstring""" lowerCAmelCase__ : str = position[index] while index != 0: lowerCAmelCase__ : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: lowerCAmelCase__ : Optional[Any] = heap[parent] lowerCAmelCase__ : Optional[Any] = position[parent] self.set_position(position[parent] ,__lowerCamelCase ) else: lowerCAmelCase__ : str = val lowerCAmelCase__ : Optional[int] = temp self.set_position(__lowerCamelCase ,__lowerCamelCase ) break lowerCAmelCase__ : Any = parent else: lowerCAmelCase__ : List[Any] = val lowerCAmelCase__ : Dict = temp self.set_position(__lowerCamelCase ,0 ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> str: """simple docstring""" lowerCAmelCase__ : int = len(__lowerCamelCase ) // 2 - 1 for i in range(__lowerCamelCase ,-1 ,-1 ): self.top_to_bottom(__lowerCamelCase ,__lowerCamelCase ,len(__lowerCamelCase ) ,__lowerCamelCase ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]: """simple docstring""" lowerCAmelCase__ : List[str] = positions[0] lowerCAmelCase__ : str = sys.maxsize self.top_to_bottom(__lowerCamelCase ,0 ,len(__lowerCamelCase ) ,__lowerCamelCase ) return temp def lowerCAmelCase__ ( lowerCamelCase_ : Dict): '''simple docstring''' lowerCAmelCase__ : Tuple = Heap() lowerCAmelCase__ : List[str] = [0] * len(lowerCamelCase_) lowerCAmelCase__ : str = [-1] * len(lowerCamelCase_) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph lowerCAmelCase__ : Optional[Any] = [] # Heap of Distance of vertices from their neighboring vertex lowerCAmelCase__ : List[str] = [] for vertex in range(len(lowerCamelCase_)): distance_tv.append(sys.maxsize) positions.append(lowerCamelCase_) heap.node_position.append(lowerCamelCase_) lowerCAmelCase__ : int = [] lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Any = sys.maxsize for neighbor, distance in adjacency_list[0]: lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : int = distance heap.heapify(lowerCamelCase_ ,lowerCamelCase_) for _ in range(1 ,len(lowerCamelCase_)): lowerCAmelCase__ : Union[str, Any] = heap.delete_minimum(lowerCamelCase_ ,lowerCamelCase_) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex)) lowerCAmelCase__ : int = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(lowerCamelCase_)] ): lowerCAmelCase__ : List[Any] = distance heap.bottom_to_top( lowerCamelCase_ ,heap.get_position(lowerCamelCase_) ,lowerCamelCase_ ,lowerCamelCase_) lowerCAmelCase__ : str = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __snake_case : List[Any] =int(input('Enter number of edges: ').strip()) __snake_case : str =defaultdict(list) for _ in range(edges_number): __snake_case : List[Any] =[int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
129
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : Optional[Any] ={ 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] =[ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys __snake_case : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
129
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): '''simple docstring''' __snake_case = StableDiffusionPanoramaPipeline __snake_case = TEXT_TO_IMAGE_PARAMS __snake_case = TEXT_TO_IMAGE_BATCH_PARAMS __snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS __snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS def _snake_case ( self: List[str] ): torch.manual_seed(0 ) __lowerCamelCase : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) __lowerCamelCase : Union[str, Any] = DDIMScheduler() torch.manual_seed(0 ) __lowerCamelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) __lowerCamelCase : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __lowerCamelCase : Optional[int] = CLIPTextModel(a ) __lowerCamelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __lowerCamelCase : int = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _snake_case ( self: Any , a: List[str] , a: Dict=0 ): __lowerCamelCase : List[Any] = torch.manual_seed(a ) __lowerCamelCase : Optional[int] = { 'prompt': 'a photo of the dolomites', 'generator': generator, # Setting height and width to None to prevent OOMs on CPU. 'height': None, 'width': None, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _snake_case ( self: Tuple ): __lowerCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : List[Any] = self.get_dummy_components() __lowerCamelCase : Optional[int] = StableDiffusionPanoramaPipeline(**a ) __lowerCamelCase : Dict = sd_pipe.to(a ) sd_pipe.set_progress_bar_config(disable=a ) __lowerCamelCase : int = self.get_dummy_inputs(a ) __lowerCamelCase : Dict = sd_pipe(**a ).images __lowerCamelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase : List[str] = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self: List[Any] ): super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _snake_case ( self: List[Any] ): super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 ) def _snake_case ( self: Tuple ): __lowerCamelCase : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Optional[int] = self.get_dummy_components() __lowerCamelCase : Optional[Any] = StableDiffusionPanoramaPipeline(**a ) __lowerCamelCase : List[str] = sd_pipe.to(a ) sd_pipe.set_progress_bar_config(disable=a ) __lowerCamelCase : Dict = self.get_dummy_inputs(a ) __lowerCamelCase : Dict = 'french fries' __lowerCamelCase : Dict = sd_pipe(**a , negative_prompt=a ) __lowerCamelCase : Tuple = output.images __lowerCamelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase : int = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self: Optional[Any] ): __lowerCamelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Union[str, Any] = self.get_dummy_components() __lowerCamelCase : int = StableDiffusionPanoramaPipeline(**a ) __lowerCamelCase : Optional[Any] = sd_pipe.to(a ) sd_pipe.set_progress_bar_config(disable=a ) __lowerCamelCase : str = self.get_dummy_inputs(a ) __lowerCamelCase : Optional[Any] = sd_pipe(**a , view_batch_size=2 ) __lowerCamelCase : int = output.images __lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase : Union[str, Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self: Union[str, Any] ): __lowerCamelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Any = self.get_dummy_components() __lowerCamelCase : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' ) __lowerCamelCase : Union[str, Any] = StableDiffusionPanoramaPipeline(**a ) __lowerCamelCase : Union[str, Any] = sd_pipe.to(a ) sd_pipe.set_progress_bar_config(disable=a ) __lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a ) __lowerCamelCase : Dict = sd_pipe(**a ).images __lowerCamelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase : Union[str, Any] = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self: Tuple ): __lowerCamelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Optional[Any] = self.get_dummy_components() __lowerCamelCase : Any = PNDMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , skip_prk_steps=a ) __lowerCamelCase : Any = StableDiffusionPanoramaPipeline(**a ) __lowerCamelCase : Union[str, Any] = sd_pipe.to(a ) sd_pipe.set_progress_bar_config(disable=a ) __lowerCamelCase : int = self.get_dummy_inputs(a ) __lowerCamelCase : Optional[int] = sd_pipe(**a ).images __lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase : List[Any] = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class A_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self: Optional[int] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self: Union[str, Any] , a: int=0 ): __lowerCamelCase : int = torch.manual_seed(a ) __lowerCamelCase : int = { 'prompt': 'a photo of the dolomites', 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _snake_case ( self: Any ): __lowerCamelCase : Tuple = 'stabilityai/stable-diffusion-2-base' __lowerCamelCase : Tuple = DDIMScheduler.from_pretrained(a , subfolder='scheduler' ) __lowerCamelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(a , scheduler=a , safety_checker=a ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) pipe.enable_attention_slicing() __lowerCamelCase : Dict = self.get_inputs() __lowerCamelCase : Optional[int] = pipe(**a ).images __lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) __lowerCamelCase : str = np.array( [ 0.3_6_9_6_8_3_9_2, 0.2_7_0_2_5_3_7_2, 0.3_2_4_4_6_7_6_6, 0.2_8_3_7_9_3_8_7, 0.3_6_3_6_3_2_7_4, 0.3_0_7_3_3_3_4_7, 0.2_7_1_0_0_0_2_7, 0.2_7_0_5_4_1_2_5, 0.2_5_5_3_6_0_9_6, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def _snake_case ( self: Tuple ): __lowerCamelCase : int = StableDiffusionPanoramaPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-base' , safety_checker=a ) __lowerCamelCase : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) pipe.enable_attention_slicing() __lowerCamelCase : Any = self.get_inputs() __lowerCamelCase : Optional[int] = pipe(**a ).images __lowerCamelCase : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) __lowerCamelCase : List[Any] = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _snake_case ( self: Union[str, Any] ): __lowerCamelCase : List[Any] = 0 def callback_fn(a: int , a: int , a: torch.FloatTensor ) -> None: __lowerCamelCase : Optional[int] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __lowerCamelCase : List[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) __lowerCamelCase : Optional[Any] = latents[0, -3:, -3:, -1] __lowerCamelCase : List[str] = np.array( [ 0.1_8_6_8_1_8_6_9, 0.3_3_9_0_7_8_1_6, 0.5_3_6_1_2_7_6, 0.1_4_4_3_2_8_6_5, -0.0_2_8_5_6_6_1_1, -0.7_3_9_4_1_1_2_3, 0.2_3_3_9_7_9_8_7, 0.4_7_3_2_2_6_8_2, -0.3_7_8_2_3_1_6_4, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: __lowerCamelCase : List[str] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) __lowerCamelCase : Optional[Any] = latents[0, -3:, -3:, -1] __lowerCamelCase : str = np.array( [ 0.1_8_5_3_9_6_4_5, 0.3_3_9_8_7_2_4_8, 0.5_3_7_8_5_5_9, 0.1_4_4_3_7_1_4_2, -0.0_2_4_5_5_2_6_1, -0.7_3_3_8_3_1_7, 0.2_3_9_9_0_7_5_5, 0.4_7_3_5_6_2_7_2, -0.3_7_8_6_5_0_5, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 __lowerCamelCase : Optional[int] = False __lowerCamelCase : List[str] = 'stabilityai/stable-diffusion-2-base' __lowerCamelCase : str = DDIMScheduler.from_pretrained(a , subfolder='scheduler' ) __lowerCamelCase : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(a , scheduler=a , safety_checker=a ) __lowerCamelCase : Any = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) pipe.enable_attention_slicing() __lowerCamelCase : List[str] = self.get_inputs() pipe(**a , callback=a , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _snake_case ( self: List[Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCamelCase : List[str] = 'stabilityai/stable-diffusion-2-base' __lowerCamelCase : Optional[int] = DDIMScheduler.from_pretrained(a , subfolder='scheduler' ) __lowerCamelCase : str = StableDiffusionPanoramaPipeline.from_pretrained(a , scheduler=a , safety_checker=a ) __lowerCamelCase : Union[str, Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __lowerCamelCase : str = self.get_inputs() __lowerCamelCase : Any = pipe(**a ) __lowerCamelCase : int = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
194
from math import pow def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ): if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count __lowerCamelCase : Optional[Any] = int(pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n __lowerCamelCase , __lowerCamelCase : Optional[Any] = backtrack( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , current_number + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. __lowerCamelCase , __lowerCamelCase : Dict = backtrack( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , current_number + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return current_sum, solutions_count def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10): raise ValueError( 'Invalid input\n' 'needed_sum must be between 1 and 1000, power between 2 and 10.' ) return backtrack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
194
1
"""simple docstring""" import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Dict=0 ) -> Any: """simple docstring""" if name is None: snake_case : List[Any] = None else: snake_case : int = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}''' snake_case : Tuple = fmt.format(_snake_case ) # Print and recurse (if needed). if isinstance(_snake_case , _snake_case ): if msg is not None: print(_snake_case ) for k in val.keys(): recursive_print(_snake_case , val[k] , spaces + 2 ) elif isinstance(_snake_case , torch.Tensor ): print(_snake_case , ":" , val.size() ) else: print(_snake_case , ":" , _snake_case ) def __lowerCAmelCase ( lowercase : Dict , lowercase : Any , lowercase : int , lowercase : Union[str, Any] , lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" snake_case : str = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] snake_case : Any = (num_heads, hidden_size, num_splits) + input_shape[1:] snake_case : List[str] = param.view(*_snake_case ) snake_case : List[Any] = param.transpose(0 , 2 ) snake_case : str = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] snake_case : int = (num_heads, num_splits, hidden_size) + input_shape[1:] snake_case : List[str] = param.view(*_snake_case ) snake_case : Any = param.transpose(0 , 1 ).contiguous() snake_case : str = param.view(*_snake_case ) return param def __lowerCAmelCase ( lowercase : int , lowercase : List[str] , lowercase : Tuple ) -> Union[str, Any]: """simple docstring""" snake_case : List[Any] = {} # old versions did not store training args snake_case : Any = input_state_dict.get("args" , _snake_case ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) snake_case : int = ds_args.padded_vocab_size snake_case : Optional[int] = ds_args.max_position_embeddings snake_case : Dict = ds_args.hidden_size snake_case : Tuple = ds_args.num_layers snake_case : Tuple = ds_args.num_attention_heads snake_case : str = ds_args.ffn_hidden_size # pprint(config) # The number of heads. snake_case : Dict = config.n_head # The hidden_size per head. snake_case : Tuple = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): snake_case : Any = input_state_dict['''checkpoint_version'''] else: snake_case : Union[str, Any] = 0.0 # The model. snake_case : List[str] = input_state_dict['''model'''] # The language model. snake_case : Optional[Any] = model['''language_model'''] # The embeddings. snake_case : Union[str, Any] = lm['''embedding'''] # The word embeddings. snake_case : Any = embeddings['''word_embeddings''']['''weight'''] # Truncate the embedding table to vocab_size rows. snake_case : List[Any] = word_embeddings[: config.vocab_size, :] snake_case : List[Any] = word_embeddings # The position embeddings. snake_case : Tuple = embeddings['''position_embeddings''']['''weight'''] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] snake_case : Optional[Any] = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' ) # Store the position embeddings. snake_case : str = pos_embeddings # The transformer. snake_case : Dict = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder'''] # The regex to extract layer names. snake_case : Dict = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" ) # The simple map of names for "automated" rules. snake_case : int = { '''attention.dense''': '''.attn.c_proj.''', '''self_attention.dense''': '''.attn.c_proj.''', '''mlp.dense_h_to_4h''': '''.mlp.c_fc.''', '''mlp.dense_4h_to_h''': '''.mlp.c_proj.''', } # Extract the layers. for key, val in transformer.items(): # Match the name. snake_case : int = layer_re.match(_snake_case ) # Stop if that's not a layer if m is None: break # The index of the layer. snake_case : Dict = int(m.group(1 ) ) # The name of the operation. snake_case : Optional[int] = m.group(2 ) # Is it a weight or a bias? snake_case : int = m.group(3 ) # The name of the layer. snake_case : Dict = F'transformer.h.{layer_idx}' # For layernorm(s), simply store the layer norm. if op_name.endswith("layernorm" ): snake_case : Dict = '''ln_1''' if op_name.startswith("input" ) else '''ln_2''' snake_case : Optional[int] = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. snake_case : List[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , _snake_case , _snake_case ) snake_case : List[str] = causal_mask # Insert a "dummy" tensor for masked_bias. snake_case : Optional[Any] = torch.tensor(-1e4 , dtype=torch.floataa ) snake_case : Optional[Any] = masked_bias snake_case : Dict = fix_query_key_value_ordering(_snake_case , _snake_case , 3 , _snake_case , _snake_case ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. snake_case : Any = out_val.transpose(0 , 1 ).contiguous() # Store. snake_case : Dict = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": snake_case : Optional[Any] = fix_query_key_value_ordering(_snake_case , _snake_case , 3 , _snake_case , _snake_case ) # Store. No change of shape. snake_case : Union[str, Any] = out_val # Transpose the weights. elif weight_or_bias == "weight": snake_case : List[str] = megatron_to_transformers[op_name] snake_case : int = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": snake_case : int = megatron_to_transformers[op_name] snake_case : List[Any] = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. snake_case : Optional[Any] = transformer['''final_layernorm.weight'''] snake_case : Tuple = transformer['''final_layernorm.bias'''] # For LM head, transformers' wants the matrix to weight embeddings. snake_case : str = word_embeddings # It should be done! return output_state_dict def __lowerCAmelCase ( ) -> Tuple: """simple docstring""" snake_case : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--print-checkpoint-structure" , action="store_true" ) parser.add_argument( "path_to_checkpoint" , type=_snake_case , help="Path to the checkpoint file (.zip archive or direct .pt file)" , ) parser.add_argument( "--config_file" , default="" , type=_snake_case , help="An optional config json file describing the pre-trained model." , ) snake_case : Optional[int] = parser.parse_args() # Extract the basename. snake_case : Optional[int] = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' ) if args.path_to_checkpoint.endswith(".zip" ): with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint: with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict: snake_case : List[Any] = torch.load(_snake_case , map_location="cpu" ) else: snake_case : int = torch.load(args.path_to_checkpoint , map_location="cpu" ) snake_case : List[str] = input_state_dict.get("args" , _snake_case ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: snake_case : Any = '''gelu_fast''' elif ds_args.openai_gelu: snake_case : Dict = '''gelu_new''' else: snake_case : Optional[int] = '''gelu''' else: # in the very early days this used to be "gelu_new" snake_case : str = '''gelu_new''' # Spell out all parameters in case the defaults change. snake_case : Optional[int] = GPTaConfig( vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_snake_case , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=_snake_case , summary_activation=_snake_case , summary_proj_to_labels=_snake_case , summary_first_dropout=0.1 , scale_attn_weights=_snake_case , use_cache=_snake_case , bos_token_id=5_0256 , eos_token_id=5_0256 , ) else: snake_case : Any = GPTaConfig.from_json_file(args.config_file ) snake_case : Optional[Any] = ['''GPT2LMHeadModel'''] # Convert. print("Converting" ) snake_case : int = convert_megatron_checkpoint(_snake_case , _snake_case , _snake_case ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(_snake_case , _snake_case ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: snake_case : Optional[int] = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": snake_case : Optional[int] = '''gpt2''' elif tokenizer_type == "PretrainedFromHF": snake_case : Tuple = ds_args.tokenizer_name_or_path else: raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' ) else: snake_case : Tuple = '''gpt2''' snake_case : Optional[Any] = AutoTokenizer.from_pretrained(_snake_case ) snake_case : Dict = type(_snake_case ).__name__ snake_case : Dict = tokenizer_class # Store the config to file. print("Saving config" ) config.save_pretrained(_snake_case ) # Save tokenizer based on args print(F'Adding {tokenizer_class} tokenizer files' ) tokenizer.save_pretrained(_snake_case ) # Store the state_dict to file. snake_case : Optional[Any] = os.path.join(_snake_case , "pytorch_model.bin" ) print(F'Saving checkpoint to "{output_checkpoint_file}"' ) torch.save(_snake_case , _snake_case ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
203
"""simple docstring""" import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case_( a__ ): def __init__( self : int , UpperCamelCase_ : VQModel , UpperCamelCase_ : UNetaDModel , UpperCamelCase_ : DDIMScheduler ): super().__init__() self.register_modules(vqvae=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Optional[int] , ): lowerCAmelCase : Dict = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase_ , ) lowerCAmelCase : Optional[int] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCAmelCase : List[str] = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(UpperCamelCase_ ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature lowerCAmelCase : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCAmelCase : List[str] = {} if accepts_eta: lowerCAmelCase : List[Any] = eta for t in self.progress_bar(self.scheduler.timesteps ): lowerCAmelCase : List[str] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) # predict the noise residual lowerCAmelCase : Tuple = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase : Optional[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample # decode the image latents with the VAE lowerCAmelCase : Dict = self.vqvae.decode(UpperCamelCase_ ).sample lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : List[str] = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
60
0
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase : Any = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class __snake_case ( lowerCamelCase_ , unittest.TestCase ): lowerCAmelCase_ = ReformerTokenizer lowerCAmelCase_ = ReformerTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = True def __a ( self : Tuple ): """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = ReformerTokenizer(_lowercase , keep_accents=_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """<s>""" SCREAMING_SNAKE_CASE__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(_lowercase ) , 10_00 ) def __a ( self : Dict ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def __a ( self : List[str] ): """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ = """I was born in 92000, and this is falsé.""" SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_lowercase ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) self.assertListEqual(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ = tokenizer.encode(_lowercase ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) def __a ( self : Union[str, Any] , _lowercase : Optional[Any]=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) # Simple input SCREAMING_SNAKE_CASE__ = """This is a simple input""" SCREAMING_SNAKE_CASE__ = ["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE__ = ("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE__ = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding="""max_length""" ) # Simple input self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding="""max_length""" ) # Simple input self.assertRaises( _lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding="""max_length""" , ) # Pair input self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding="""max_length""" ) # Pair input self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding="""max_length""" ) # Pair input self.assertRaises( _lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding="""max_length""" , ) def __a ( self : Dict ): """simple docstring""" pass def __a ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ReformerTokenizer(_lowercase , keep_accents=_lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [2_85, 46, 10, 1_70, 3_82] , ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowercase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual( _lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def __a ( self : Union[str, Any] ): """simple docstring""" return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" ) @slow def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """Hello World!""" SCREAMING_SNAKE_CASE__ = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) ) @slow def __a ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) SCREAMING_SNAKE_CASE__ = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) ) @require_torch @slow def __a ( self : Optional[int] ): """simple docstring""" import torch from transformers import ReformerConfig, ReformerModel # Build sequence SCREAMING_SNAKE_CASE__ = list(self.big_tokenizer.get_vocab().keys() )[:10] SCREAMING_SNAKE_CASE__ = """ """.join(_lowercase ) SCREAMING_SNAKE_CASE__ = self.big_tokenizer.encode_plus(_lowercase , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) SCREAMING_SNAKE_CASE__ = encoded_sequence["""input_ids"""].shape SCREAMING_SNAKE_CASE__ = ReformerModel(_lowercase ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_lowercase ) model(**_lowercase ) @slow def __a ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE__ = {"""input_ids""": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 SCREAMING_SNAKE_CASE__ = [ """This is a very simple sentence.""", """The quick brown fox jumps over the lazy dog.""", ] self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=_lowercase , sequences=_lowercase , )
366
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger(__name__) __lowerCamelCase : Tuple = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS} def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES else: SCREAMING_SNAKE_CASE__ = {tokenizer_name: getattr(__UpperCamelCase , tokenizer_name + """Fast""" )} logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES[tokenizer_name] SCREAMING_SNAKE_CASE__ = True if checkpoint_name is None: SCREAMING_SNAKE_CASE__ = list(tokenizer_class.max_model_input_sizes.keys() ) else: SCREAMING_SNAKE_CASE__ = [checkpoint_name] logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(__UpperCamelCase , force_download=__UpperCamelCase ) # Save fast tokenizer logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = checkpoint.split("""/""" ) SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , __UpperCamelCase ) elif add_prefix: SCREAMING_SNAKE_CASE__ = checkpoint SCREAMING_SNAKE_CASE__ = dump_path else: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = dump_path logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: SCREAMING_SNAKE_CASE__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] SCREAMING_SNAKE_CASE__ = file_path.split(__UpperCamelCase )[-1][0] if next_char == "/": SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE__ = None logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) SCREAMING_SNAKE_CASE__ = tokenizer.save_pretrained( __UpperCamelCase , legacy_format=__UpperCamelCase , filename_prefix=__UpperCamelCase ) logger.info(f"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith("""tokenizer.json""" ): os.remove(__UpperCamelCase ) logger.info(f"""=> removing {file_name}""" ) if __name__ == "__main__": __lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.''' ) parser.add_argument( '''--tokenizer_name''', default=None, type=str, help=( F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """ '''download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--checkpoint_name''', default=None, type=str, help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''', ) parser.add_argument( '''--force_download''', action='''store_true''', help='''Re-download checkpoints.''', ) __lowerCamelCase : Any = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
204
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = { "configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"], "configuration_maskformer_swin": ["MaskFormerSwinConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["MaskFormerFeatureExtractor"] __a = ["MaskFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "MaskFormerForInstanceSegmentation", "MaskFormerModel", "MaskFormerPreTrainedModel", ] __a = [ "MaskFormerSwinBackbone", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure)
35
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =["""image_processor""", """tokenizer"""] snake_case_ ="""Pix2StructImageProcessor""" snake_case_ =("""T5Tokenizer""", """T5TokenizerFast""") def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]: """simple docstring""" lowerCAmelCase__ : str = False super().__init__(__lowerCamelCase ,__lowerCamelCase ) def __call__(self ,__lowerCamelCase=None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = 20_48 ,__lowerCamelCase = 0 ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = True ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> BatchEncoding: """simple docstring""" if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None and not self.image_processor.is_vqa: lowerCAmelCase__ : List[str] = self.tokenizer lowerCAmelCase__ : List[str] = self.tokenizer( text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values lowerCAmelCase__ : int = self.image_processor( __lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,**__lowerCamelCase ) else: # add pixel_values and bbox lowerCAmelCase__ : List[str] = self.image_processor( __lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,header_text=__lowerCamelCase ,**__lowerCamelCase ) if text is not None and not self.image_processor.is_vqa: lowerCAmelCase__ : List[str] = self.tokenizer( text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,) if "attention_mask" in text_encoding: lowerCAmelCase__ : List[str] = text_encoding.pop('''attention_mask''' ) if "input_ids" in text_encoding: lowerCAmelCase__ : Dict = text_encoding.pop('''input_ids''' ) else: lowerCAmelCase__ : int = None if text_encoding is not None: encoding_image_processor.update(__lowerCamelCase ) return encoding_image_processor def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Optional[Any]: """simple docstring""" return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase ) def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> str: """simple docstring""" return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase ) @property def lowerCAmelCase__ (self ) -> Any: """simple docstring""" lowerCAmelCase__ : Dict = self.tokenizer.model_input_names lowerCAmelCase__ : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
129
0
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split snake_case__ = datasets.load_iris() snake_case__ = np.array(data["""data"""]) snake_case__ = np.array(data["""target"""]) snake_case__ = data["""target_names"""] snake_case__ , snake_case__ , snake_case__ , snake_case__ = train_test_split(X, y) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ) -> Optional[int]: return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int]=5 ) -> Optional[int]: A_ : Optional[int] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified A_ : str = [] for data_point in data: A_ : int = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. A_ : int = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified A_ : Union[str, Any] = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
360
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
0
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __snake_case = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=None , snake_case__=True , snake_case__=True , snake_case__=None , ) -> str: '''simple docstring''' UpperCAmelCase : Union[str, Any] =size if size is not None else {'''height''': 20, '''width''': 20} UpperCAmelCase : int =parent UpperCAmelCase : Tuple =batch_size UpperCAmelCase : Union[str, Any] =num_channels UpperCAmelCase : Optional[int] =image_size UpperCAmelCase : Optional[int] =min_resolution UpperCAmelCase : Tuple =max_resolution UpperCAmelCase : Optional[int] =size UpperCAmelCase : Union[str, Any] =do_normalize UpperCAmelCase : Dict =do_convert_rgb UpperCAmelCase : Any =[512, 1024, 2048, 4096] UpperCAmelCase : Optional[int] =patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Dict ='''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' UpperCAmelCase : Union[str, Any] =Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('''RGB''' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class __snake_case ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Any = PixaStructImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Tuple =PixaStructImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , '''do_normalize''' ) ) self.assertTrue(hasattr(snake_case__ , '''do_convert_rgb''' ) ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Dict =self.image_processor_tester.prepare_dummy_image() UpperCAmelCase : Dict =self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase : List[Any] =2048 UpperCAmelCase : Optional[Any] =image_processor(snake_case__ , return_tensors='''pt''' , max_patches=snake_case__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : List[str] =self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase : Optional[Any] =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase : str =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase : Dict =image_processor( snake_case__ , return_tensors='''pt''' , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[Any] =self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase : Tuple =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase : int =True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(snake_case__ ): UpperCAmelCase : Union[str, Any] =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case__ ).flattened_patches UpperCAmelCase : int ='''Hello''' UpperCAmelCase : List[Any] =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase : Dict =image_processor( snake_case__ , return_tensors='''pt''' , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : List[str] =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) UpperCAmelCase : Optional[Any] =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase : Union[str, Any] =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase : List[str] =image_processor( snake_case__ , return_tensors='''pt''' , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : int =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input UpperCAmelCase : Any =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase : str =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase : Tuple =image_processor( snake_case__ , return_tensors='''pt''' , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class __snake_case ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Optional[int] = PixaStructImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase : int =3 @property def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , '''do_normalize''' ) ) self.assertTrue(hasattr(snake_case__ , '''do_convert_rgb''' ) ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase : str =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase : Optional[int] =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase : Dict =image_processor( snake_case__ , return_tensors='''pt''' , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
348
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __snake_case : def __init__( self , snake_case__ , snake_case__=14 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , ) -> str: '''simple docstring''' UpperCAmelCase : str =parent UpperCAmelCase : Tuple =batch_size UpperCAmelCase : Optional[int] =seq_length UpperCAmelCase : Optional[int] =is_training UpperCAmelCase : Tuple =use_input_mask UpperCAmelCase : List[Any] =use_token_type_ids UpperCAmelCase : Optional[Any] =use_labels UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : List[Any] =hidden_size UpperCAmelCase : Optional[int] =rotary_dim UpperCAmelCase : Union[str, Any] =num_hidden_layers UpperCAmelCase : List[Any] =num_attention_heads UpperCAmelCase : Dict =intermediate_size UpperCAmelCase : Union[str, Any] =hidden_act UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : Dict =attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] =max_position_embeddings UpperCAmelCase : str =initializer_range UpperCAmelCase : Optional[int] =None UpperCAmelCase : List[Any] =vocab_size - 1 UpperCAmelCase : Optional[Any] =vocab_size - 1 UpperCAmelCase : List[Any] =vocab_size - 1 def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : List[Any] =None if self.use_input_mask: UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Dict =GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =config_and_inputs UpperCAmelCase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =20 UpperCAmelCase : Any =model_class_name(snake_case__ ) UpperCAmelCase : str =model.init_cache(input_ids.shape[0] , snake_case__ ) UpperCAmelCase : Any =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase : Optional[Any] =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCAmelCase : Optional[Any] =model( input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : List[str] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase : Optional[Any] =model( input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , ) UpperCAmelCase : List[Any] =model(snake_case__ ) UpperCAmelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Dict =20 UpperCAmelCase : Dict =model_class_name(snake_case__ ) UpperCAmelCase : Tuple =jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) UpperCAmelCase : Dict =model.init_cache(input_ids.shape[0] , snake_case__ ) UpperCAmelCase : int =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCAmelCase : Optional[Any] =model( input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase : str =model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : Any =model(snake_case__ , attention_mask=snake_case__ ) UpperCAmelCase : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =FlaxGPTJModelTester(self ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) @tooslow def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) UpperCAmelCase : Optional[Any] =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=snake_case__ , truncation=snake_case__ ) UpperCAmelCase : Optional[int] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) UpperCAmelCase : str =False UpperCAmelCase : Union[str, Any] =model.config.eos_token_id UpperCAmelCase : List[Any] =jax.jit(model.generate ) UpperCAmelCase : Dict =jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences UpperCAmelCase : Any =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ ) UpperCAmelCase : Tuple =[ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(snake_case__ , snake_case__ ) @is_pt_flax_cross_test def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase : List[str] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCAmelCase : Any =model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCAmelCase : Any =getattr(snake_case__ , snake_case__ ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pt_inputs['''input_ids'''].shape UpperCAmelCase : Tuple =np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : int =0 UpperCAmelCase : Optional[int] =1 UpperCAmelCase : Optional[int] =0 UpperCAmelCase : Union[str, Any] =1 UpperCAmelCase : List[str] =pt_model_class(snake_case__ ).eval() UpperCAmelCase : Optional[int] =model_class(snake_case__ , dtype=jnp.floataa ) UpperCAmelCase : Any =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ ) UpperCAmelCase : Union[str, Any] =fx_state with torch.no_grad(): UpperCAmelCase : Any =pt_model(**snake_case__ ).to_tuple() UpperCAmelCase : Dict =fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(snake_case__ ) UpperCAmelCase : str =model_class.from_pretrained(snake_case__ , from_pt=snake_case__ ) UpperCAmelCase : int =fx_model_loaded(**snake_case__ ).to_tuple() self.assertEqual( len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCAmelCase : int =model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCAmelCase : int =getattr(snake_case__ , snake_case__ ) UpperCAmelCase : Dict =pt_model_class(snake_case__ ).eval() UpperCAmelCase : str =model_class(snake_case__ , dtype=jnp.floataa ) UpperCAmelCase : Optional[Any] =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params ) UpperCAmelCase , UpperCAmelCase : Optional[int] =pt_inputs['''input_ids'''].shape UpperCAmelCase : Optional[int] =np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : str =0 UpperCAmelCase : Any =1 UpperCAmelCase : List[Any] =0 UpperCAmelCase : Tuple =1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): UpperCAmelCase : Optional[Any] =pt_model(**snake_case__ ).to_tuple() UpperCAmelCase : List[Any] =fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(snake_case__ ) UpperCAmelCase : Tuple =pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__ ) with torch.no_grad(): UpperCAmelCase : Any =pt_model_loaded(**snake_case__ ).to_tuple() self.assertEqual( len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase : str =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) UpperCAmelCase : Tuple =model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ )
348
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __A = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["ViTFeatureExtractor"] __A = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
348
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_choices def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_attention_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = True snake_case_ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __lowerCamelCase = model(lowerCamelCase__ )[0] __lowerCamelCase = [1, 11, 50_265] self.assertEqual(list(output.shape ) , lowerCamelCase__ ) # compare the actual values for a slice. __lowerCamelCase = np.array( [[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __lowerCamelCase = model(lowerCamelCase__ )[0] # compare the actual values for a slice. __lowerCamelCase = np.array( [[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
348
1
from math import isqrt, loga def UpperCAmelCase_ ( __lowerCAmelCase ) -> list[int]: __lowercase : Optional[Any] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __lowerCAmelCase , __lowerCAmelCase ): __lowercase : Dict = False return [i for i in range(2 , __lowerCAmelCase ) if is_prime[i]] def UpperCAmelCase_ ( __lowerCAmelCase = 800_800 , __lowerCAmelCase = 800_800 ) -> int: __lowercase : Tuple = degree * loga(__lowerCAmelCase ) __lowercase : List[str] = int(__lowerCAmelCase ) __lowercase : Optional[Any] = calculate_prime_numbers(__lowerCAmelCase ) __lowercase : Any = 0 __lowercase : int = 0 __lowercase : Tuple = len(__lowerCAmelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'{solution() = }')
156
import numpy as np from PIL import Image def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray: __lowercase : Optional[int] = np.array(__lowerCAmelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) __lowercase : Optional[int] = 0 __lowercase : Union[str, Any] = 0 __lowercase : Optional[Any] = 0 __lowercase : str = 0 # compute the shape of the output matrix __lowercase : Optional[Any] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __lowercase : List[str] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __lowercase : Optional[int] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __lowercase : Any = 0 __lowercase : List[Any] = 0 return updated_arr def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray: __lowercase : Optional[Any] = np.array(__lowerCAmelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) __lowercase : int = 0 __lowercase : str = 0 __lowercase : List[str] = 0 __lowercase : Dict = 0 # compute the shape of the output matrix __lowercase : List[Any] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __lowercase : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __lowercase : str = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __lowercase : int = 0 __lowercase : Tuple = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="avgpooling", verbose=True) # Loading the image __lowerCAmelCase : List[Any] = Image.open("path_to_image") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
156
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): lowercase_ = IFInpaintingSuperResolutionPipeline lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} ) lowercase_ = PipelineTesterMixin.required_optional_params - {'latents'} def __lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" return self._get_superresolution_dummy_components() def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=0 ) -> Union[str, Any]: """simple docstring""" if str(lowercase_ ).startswith('''mps''' ): _a = torch.manual_seed(lowercase_ ) else: _a = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) _a = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) _a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) _a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) _a = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def __lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" self._test_save_load_local() def __lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
371
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _snake_case : str = logging.get_logger(__name__) _snake_case : Tuple = {'vocab_file': 'spiece.model'} _snake_case : Optional[int] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } _snake_case : Tuple = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } class A ( _a ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ['input_ids', 'attention_mask'] lowercase_ = [] def __init__( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]="<unk>" , lowerCAmelCase_ : Tuple="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : int="<pad>" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Dict="[MASK]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> None: """simple docstring""" _a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token _a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token _a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token _a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token _a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token _a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token _a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , ) _a = vocab_file _a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase_ ) @property def __lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" return self.sp_model.get_piece_size() def __lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" _a = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Tuple: """simple docstring""" _a = self.__dict__.copy() _a = None return state def __setstate__( self : List[str] , lowerCAmelCase_ : Any ) -> Dict: """simple docstring""" _a = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _a = {} _a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[str] ) -> int: """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Tuple ) -> str: """simple docstring""" _a = self.sp_model.IdToPiece(lowerCAmelCase_ ) return token def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" _a = [] _a = '''''' _a = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase_ ) + token _a = True _a = [] else: current_sub_tokens.append(lowerCAmelCase_ ) _a = False out_string += self.sp_model.decode(lowerCAmelCase_ ) return out_string.strip() def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Tuple , ) -> str: """simple docstring""" _a = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase_ ) _a = self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _a = [] _a = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) ) _a = [] sub_texts.append(lowerCAmelCase_ ) else: current_sub_text.append(lowerCAmelCase_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: _a = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(lowerCAmelCase_ ) ) else: _a = ''''''.join(lowerCAmelCase_ ) _a = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _a = self.clean_up_tokenization(lowerCAmelCase_ ) return clean_text else: return text def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _a = os.path.join( lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase_ , '''wb''' ) as fi: _a = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase_ ) return (out_vocab_file,) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a = [self.cls_token_id] _a = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1] def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
179
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class _A ( UpperCamelCase_ ): _SCREAMING_SNAKE_CASE : Any = '''philschmid/bart-large-cnn-samsum''' _SCREAMING_SNAKE_CASE : Dict = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) _SCREAMING_SNAKE_CASE : Tuple = '''summarizer''' _SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer _SCREAMING_SNAKE_CASE : Dict = AutoModelForSeqaSeqLM _SCREAMING_SNAKE_CASE : Optional[int] = ['''text'''] _SCREAMING_SNAKE_CASE : Optional[int] = ['''text'''] def __A ( self , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' return self.pre_processor(_A , return_tensors="""pt""" , truncation=_A ) def __A ( self , __UpperCAmelCase ) -> Any: '''simple docstring''' return self.model.generate(**_A )[0] def __A ( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' return self.pre_processor.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
254
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCAmelCase__ : List[Any] =logging.get_logger(__name__) def __lowercase ( a__ , a__ , a__ , a__ ) -> Tuple[int, int]: def constraint_to_multiple_of(a__ , a__ , a__=0 , a__=None ): __SCREAMING_SNAKE_CASE = round(val / multiple ) * multiple if max_val is not None and x > max_val: __SCREAMING_SNAKE_CASE = math.floor(val / multiple ) * multiple if x < min_val: __SCREAMING_SNAKE_CASE = math.ceil(val / multiple ) * multiple return x __SCREAMING_SNAKE_CASE = (output_size, output_size) if isinstance(a__ , a__ ) else output_size __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_image_size(a__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output_size # determine new height and width __SCREAMING_SNAKE_CASE = output_height / input_height __SCREAMING_SNAKE_CASE = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width __SCREAMING_SNAKE_CASE = scale_width else: # fit height __SCREAMING_SNAKE_CASE = scale_height __SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_height * input_height , multiple=a__ ) __SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_width * input_width , multiple=a__ ) return (new_height, new_width) class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ : List[str] = ['''pixel_values'''] def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = False , _A = 1 , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , **_A , ): '''simple docstring''' super().__init__(**_A ) __SCREAMING_SNAKE_CASE = size if size is not None else {'height': 384, 'width': 384} __SCREAMING_SNAKE_CASE = get_size_dict(_A ) __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = keep_aspect_ratio __SCREAMING_SNAKE_CASE = ensure_multiple_of __SCREAMING_SNAKE_CASE = resample __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD def _A ( self , _A , _A , _A = False , _A = 1 , _A = PILImageResampling.BICUBIC , _A = None , **_A , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __SCREAMING_SNAKE_CASE = get_resize_output_image_size( _A , output_size=(size['height'], size['width']) , keep_aspect_ratio=_A , multiple=_A , ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def _A ( self , _A , _A , _A = None , **_A , ): '''simple docstring''' return rescale(_A , scale=_A , data_format=_A , **_A ) def _A ( self , _A , _A , _A , _A = None , **_A , ): '''simple docstring''' return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def _A ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE = size if size is not None else self.size __SCREAMING_SNAKE_CASE = get_size_dict(_A ) __SCREAMING_SNAKE_CASE = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio __SCREAMING_SNAKE_CASE = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of __SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor __SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean __SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std __SCREAMING_SNAKE_CASE = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE = [to_numpy_array(_A ) for image in images] if do_resize: __SCREAMING_SNAKE_CASE = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_rescale: __SCREAMING_SNAKE_CASE = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __SCREAMING_SNAKE_CASE = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_A , _A ) for image in images] __SCREAMING_SNAKE_CASE = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A ) def _A ( self , _A , _A = None ): '''simple docstring''' __SCREAMING_SNAKE_CASE = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_A ) != len(_A ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(_A ): __SCREAMING_SNAKE_CASE = target_sizes.numpy() __SCREAMING_SNAKE_CASE = [] for idx in range(len(_A ) ): __SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_A ) __SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_A ) else: __SCREAMING_SNAKE_CASE = logits.argmax(dim=1 ) __SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
257
0
class _snake_case : def __init__( self , _a ): __magic_name__ : str = len(_a ) __magic_name__ : Union[str, Any] = [0] * len_array if len_array > 0: __magic_name__ : Union[str, Any] = array[0] for i in range(1 , _a ): __magic_name__ : Union[str, Any] = self.prefix_sum[i - 1] + array[i] def SCREAMING_SNAKE_CASE ( self , _a , _a ): if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def SCREAMING_SNAKE_CASE ( self , _a ): __magic_name__ : List[Any] = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_a ) return False if __name__ == "__main__": import doctest doctest.testmod()
41
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging snake_case : Union[str, Any] = logging.get_logger(__name__) class _snake_case : UpperCamelCase__ = 42 UpperCamelCase__ = None @staticmethod def SCREAMING_SNAKE_CASE ( ): raise NotImplementedError def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ): raise NotImplementedError def SCREAMING_SNAKE_CASE ( self , _a ): raise NotImplementedError def SCREAMING_SNAKE_CASE ( self ): if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def SCREAMING_SNAKE_CASE ( cls ): return f'''`pip install {cls.pip_package or cls.name}`''' class _snake_case ( snake_case ): UpperCamelCase__ = 'optuna' @staticmethod def SCREAMING_SNAKE_CASE ( ): return is_optuna_available() def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ): return run_hp_search_optuna(_a , _a , _a , **_a ) def SCREAMING_SNAKE_CASE ( self , _a ): return default_hp_space_optuna(_a ) class _snake_case ( snake_case ): UpperCamelCase__ = 'ray' UpperCamelCase__ = '\'ray[tune]\'' @staticmethod def SCREAMING_SNAKE_CASE ( ): return is_ray_available() def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ): return run_hp_search_ray(_a , _a , _a , **_a ) def SCREAMING_SNAKE_CASE ( self , _a ): return default_hp_space_ray(_a ) class _snake_case ( snake_case ): UpperCamelCase__ = 'sigopt' @staticmethod def SCREAMING_SNAKE_CASE ( ): return is_sigopt_available() def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ): return run_hp_search_sigopt(_a , _a , _a , **_a ) def SCREAMING_SNAKE_CASE ( self , _a ): return default_hp_space_sigopt(_a ) class _snake_case ( snake_case ): UpperCamelCase__ = 'wandb' @staticmethod def SCREAMING_SNAKE_CASE ( ): return is_wandb_available() def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ): return run_hp_search_wandb(_a , _a , _a , **_a ) def SCREAMING_SNAKE_CASE ( self , _a ): return default_hp_space_wandb(_a ) snake_case : int = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCAmelCase_ ( ) -> str: '''simple docstring''' __magic_name__ : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_snake_case ) > 0: __magic_name__ : Dict = available_backends[0].name if len(_snake_case ) > 1: logger.info( F'''{len(_snake_case )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( F''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
41
1
"""simple docstring""" import torch from torch import nn class __A ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ,_snake_case : Any ,_snake_case : str ,_snake_case : List[Any] ,_snake_case : str ,_snake_case : Optional[Any]=1 ,_snake_case : List[str]=False ) -> Optional[Any]: """simple docstring""" super().__init__() lowercase__ : Optional[Any] = n_token lowercase__ : List[str] = d_embed lowercase__ : int = d_proj lowercase__ : Union[str, Any] = cutoffs + [n_token] lowercase__ : Optional[Any] = [0] + self.cutoffs lowercase__ : Optional[Any] = div_val lowercase__ : Dict = self.cutoffs[0] lowercase__ : str = len(self.cutoffs ) - 1 lowercase__ : Optional[Any] = self.shortlist_size + self.n_clusters if self.n_clusters > 0: lowercase__ : List[Any] = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) ) lowercase__ : Any = nn.Parameter(torch.zeros(self.n_clusters ) ) lowercase__ : Dict = nn.ModuleList() lowercase__ : Optional[Any] = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) ) else: self.out_projs.append(_snake_case ) self.out_layers.append(nn.Linear(_snake_case ,_snake_case ) ) else: for i in range(len(self.cutoffs ) ): lowercase__ , lowercase__ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase__ : List[str] = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) ) self.out_layers.append(nn.Linear(_snake_case ,r_idx - l_idx ) ) lowercase__ : Union[str, Any] = keep_order def UpperCAmelCase ( self : List[Any] ,_snake_case : int ,_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : Optional[Any] ) -> Tuple: """simple docstring""" if proj is None: lowercase__ : List[Any] = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: lowercase__ : List[str] = nn.functional.linear(_snake_case ,proj.t().contiguous() ) lowercase__ : Tuple = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def UpperCAmelCase ( self : Dict ,_snake_case : List[str] ,_snake_case : Dict=None ,_snake_case : Dict=False ) -> Optional[int]: """simple docstring""" if labels is not None: # Shift so that tokens < n predict n lowercase__ : List[str] = hidden[..., :-1, :].contiguous() lowercase__ : List[str] = labels[..., 1:].contiguous() lowercase__ : str = hidden.view(-1 ,hidden.size(-1 ) ) lowercase__ : int = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' ) else: lowercase__ : str = hidden.view(-1 ,hidden.size(-1 ) ) if self.n_clusters == 0: lowercase__ : int = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] ) if labels is not None: lowercase__ : Dict = labels != -100 lowercase__ : Union[str, Any] = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device ) lowercase__ : List[str] = ( -nn.functional.log_softmax(_snake_case ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: lowercase__ : str = nn.functional.log_softmax(_snake_case ,dim=-1 ) else: # construct weights and biases lowercase__ , lowercase__ : Dict = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowercase__ , lowercase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase__ : List[str] = self.out_layers[0].weight[l_idx:r_idx] lowercase__ : int = self.out_layers[0].bias[l_idx:r_idx] else: lowercase__ : Union[str, Any] = self.out_layers[i].weight lowercase__ : List[str] = self.out_layers[i].bias if i == 0: lowercase__ : int = torch.cat([weight_i, self.cluster_weight] ,dim=0 ) lowercase__ : Tuple = torch.cat([bias_i, self.cluster_bias] ,dim=0 ) weights.append(_snake_case ) biases.append(_snake_case ) lowercase__ , lowercase__ , lowercase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0] lowercase__ : Optional[int] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case ) lowercase__ : Tuple = nn.functional.log_softmax(_snake_case ,dim=1 ) if labels is None: lowercase__ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: lowercase__ : List[Any] = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device ) lowercase__ : Any = 0 lowercase__ : Optional[int] = [0] + self.cutoffs for i in range(len(_snake_case ) - 1 ): lowercase__ , lowercase__ : Any = cutoff_values[i], cutoff_values[i + 1] if labels is not None: lowercase__ : Dict = (labels >= l_idx) & (labels < r_idx) lowercase__ : Optional[int] = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue lowercase__ : Optional[int] = labels.index_select(0 ,_snake_case ) - l_idx lowercase__ : Tuple = head_logprob.index_select(0 ,_snake_case ) lowercase__ : List[Any] = hidden.index_select(0 ,_snake_case ) else: lowercase__ : int = hidden if i == 0: if labels is not None: lowercase__ : str = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 ) else: lowercase__ : Dict = head_logprob[:, : self.cutoffs[0]] else: lowercase__ , lowercase__ , lowercase__ : Optional[Any] = weights[i], biases[i], self.out_projs[i] lowercase__ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = nn.functional.log_softmax(_snake_case ,dim=1 ) lowercase__ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: lowercase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 ,target_i[:, None] ).squeeze(1 ) else: lowercase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i lowercase__ : Optional[Any] = logprob_i if labels is not None: if (hasattr(self ,'''keep_order''' ) and self.keep_order) or keep_order: out.index_copy_(0 ,_snake_case ,-logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[str] ) -> int: """simple docstring""" if self.n_clusters == 0: lowercase__ : List[Any] = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] ) return nn.functional.log_softmax(_snake_case ,dim=-1 ) else: # construct weights and biases lowercase__ , lowercase__ : Optional[Any] = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowercase__ , lowercase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase__ : List[Any] = self.out_layers[0].weight[l_idx:r_idx] lowercase__ : List[Any] = self.out_layers[0].bias[l_idx:r_idx] else: lowercase__ : Optional[int] = self.out_layers[i].weight lowercase__ : int = self.out_layers[i].bias if i == 0: lowercase__ : str = torch.cat([weight_i, self.cluster_weight] ,dim=0 ) lowercase__ : Dict = torch.cat([bias_i, self.cluster_bias] ,dim=0 ) weights.append(_snake_case ) biases.append(_snake_case ) lowercase__ , lowercase__ , lowercase__ : List[str] = weights[0], biases[0], self.out_projs[0] lowercase__ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case ) lowercase__ : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) ) lowercase__ : List[Any] = nn.functional.log_softmax(_snake_case ,dim=1 ) lowercase__ : Optional[Any] = [0] + self.cutoffs for i in range(len(_snake_case ) - 1 ): lowercase__ , lowercase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1] if i == 0: lowercase__ : Dict = head_logprob[:, : self.cutoffs[0]] else: lowercase__ , lowercase__ , lowercase__ : List[str] = weights[i], biases[i], self.out_projs[i] lowercase__ : Any = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case ) lowercase__ : Any = nn.functional.log_softmax(_snake_case ,dim=1 ) lowercase__ : Any = head_logprob[:, -i] + tail_logprob_i lowercase__ : str = logprob_i return out
16
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''') class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any: SCREAMING_SNAKE_CASE : str = scheduler SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers] SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer SCREAMING_SNAKE_CASE : List[str] = GradientState() def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes for _ in range(_lowerCamelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) else: self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Union[str, Any]: return self.scheduler.get_last_lr() def __lowerCAmelCase ( self ) ->List[str]: return self.scheduler.state_dict() def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: self.scheduler.load_state_dict(_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: return self.scheduler.get_lr() def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]: return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
313
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __lowerCAmelCase ( __a ): snake_case : Dict = ["""input_values""", """padding_mask"""] def __init__(self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 2_4_0_0_0 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ): super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCAmelCase : Dict = chunk_length_s _UpperCAmelCase : Dict = overlap @property def snake_case_ (self ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def snake_case_ (self ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" F" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if padding and truncation: raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" ) elif padding is None: # by default let's pad the inputs _UpperCAmelCase : Optional[int] = True _UpperCAmelCase : Union[str, Any] = bool( isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: _UpperCAmelCase : Optional[int] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ): _UpperCAmelCase : str = np.asarray(lowerCAmelCase__ , dtype=np.floataa ) elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): _UpperCAmelCase : Any = raw_audio.astype(np.floataa ) # always return batch if not is_batched: _UpperCAmelCase : Union[str, Any] = [np.asarray(lowerCAmelCase__ ).T] # verify inputs are valid for idx, example in enumerate(lowerCAmelCase__ ): if example.ndim > 2: raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels" ) _UpperCAmelCase : Union[str, Any] = None _UpperCAmelCase : int = BatchFeature({"""input_values""": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: _UpperCAmelCase : List[str] = min(array.shape[0] for array in raw_audio ) _UpperCAmelCase : List[Any] = int(np.floor(max_length / self.chunk_stride ) ) _UpperCAmelCase : List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: _UpperCAmelCase : Union[str, Any] = max(array.shape[0] for array in raw_audio ) _UpperCAmelCase : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) _UpperCAmelCase : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length _UpperCAmelCase : List[str] = """max_length""" else: _UpperCAmelCase : List[Any] = input_values # normal padding on batch if padded_inputs is None: _UpperCAmelCase : Any = self.pad( lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ) if padding: _UpperCAmelCase : Any = padded_inputs.pop("""attention_mask""" ) _UpperCAmelCase : Dict = [] for example in padded_inputs.pop("""input_values""" ): if self.feature_size == 1: _UpperCAmelCase : List[Any] = example[..., None] input_values.append(example.T ) _UpperCAmelCase : List[Any] = input_values if return_tensors is not None: _UpperCAmelCase : int = padded_inputs.convert_to_tensors(lowerCAmelCase__ ) return padded_inputs
351
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowerCAmelCase_ : str = logging.get_logger(__name__) lowerCAmelCase_ : Union[str, Any] = TypeVar('''DatasetType''', Dataset, IterableDataset) def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("""Unable to interleave an empty list of datasets.""" ) for i, dataset in enumerate(lowerCAmelCase_ ): if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ): if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " """is an empty dataset dictionary.""" ) raise ValueError( f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." ) if i == 0: _UpperCAmelCase , _UpperCAmelCase : Dict = ( (Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ ) else: return _interleave_iterable_datasets( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ ) def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , ): if not dsets: raise ValueError("""Unable to concatenate an empty list of datasets.""" ) for i, dataset in enumerate(lowerCAmelCase_ ): if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ): if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " """is an empty dataset dictionary.""" ) raise ValueError( f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." ) if i == 0: _UpperCAmelCase , _UpperCAmelCase : Dict = ( (Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ ) else: return _concatenate_iterable_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
170
0
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa _a = logging.getLogger(__name__) class __lowerCamelCase ( snake_case__): """simple docstring""" UpperCamelCase__ = 'summarization' UpperCamelCase__ = ['loss'] UpperCamelCase__ = ROUGE_KEYS UpperCamelCase__ = 'rouge2' def __init__( self , UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" if hparams.sortish_sampler and hparams.gpus > 1: _UpperCAmelCase = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' ) if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' ) super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ ) use_task_specific_params(self.model , 'summarization' ) save_git_info(self.hparams.output_dir ) _UpperCAmelCase = Path(self.output_dir ) / 'metrics.json' _UpperCAmelCase = Path(self.output_dir ) / 'hparams.pkl' pickle_save(self.hparams , self.hparams_save_path ) _UpperCAmelCase = 0 _UpperCAmelCase = defaultdict(lowerCAmelCase__ ) _UpperCAmelCase = self.config.model_type _UpperCAmelCase = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size _UpperCAmelCase = { 'data_dir': self.hparams.data_dir, 'max_source_length': self.hparams.max_source_length, 'prefix': self.model.config.prefix or '', } _UpperCAmelCase = { 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } _UpperCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} _UpperCAmelCase = { 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}""" assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}""" if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) _UpperCAmelCase = get_git_info()['repo_sha'] _UpperCAmelCase = hparams.num_workers _UpperCAmelCase = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ): _UpperCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang] _UpperCAmelCase = self.decoder_start_token_id _UpperCAmelCase = ( SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset ) _UpperCAmelCase = False _UpperCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: _UpperCAmelCase = self.hparams.eval_max_gen_length else: _UpperCAmelCase = self.model.config.max_length _UpperCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = { k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(lowerCAmelCase__ , Path(self.output_dir ) / 'text_batch.json' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' ) _UpperCAmelCase = True return readable_batch def UpperCamelCase ( self , UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" return self.model(lowerCAmelCase__ , **lowerCAmelCase__ ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = self.tokenizer.batch_decode( lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) return lmap(str.strip , lowerCAmelCase__ ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = self.tokenizer.pad_token_id _UpperCAmelCase , _UpperCAmelCase = batch['input_ids'], batch['attention_mask'] _UpperCAmelCase = batch['labels'] if isinstance(self.model , lowerCAmelCase__ ): _UpperCAmelCase = self.model._shift_right(lowerCAmelCase__ ) else: _UpperCAmelCase = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero _UpperCAmelCase = decoder_input_ids self.save_readable_batch(lowerCAmelCase__ ) _UpperCAmelCase = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) _UpperCAmelCase = outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id _UpperCAmelCase = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ ) assert lm_logits.shape[-1] == self.vocab_size _UpperCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: _UpperCAmelCase = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = label_smoothed_nll_loss( lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ ) return (loss,) @property def UpperCamelCase ( self ): """simple docstring""" return self.tokenizer.pad_token_id def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = self._step(lowerCAmelCase__ ) _UpperCAmelCase = dict(zip(self.loss_names , lowerCAmelCase__ ) ) # tokens per batch _UpperCAmelCase = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum() _UpperCAmelCase = batch['input_ids'].shape[0] _UpperCAmelCase = batch['input_ids'].eq(self.pad ).sum() _UpperCAmelCase = batch['input_ids'].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" return self._generative_step(lowerCAmelCase__ ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="val" ): """simple docstring""" self.step_count += 1 _UpperCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} _UpperCAmelCase = losses['loss'] _UpperCAmelCase = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } _UpperCAmelCase = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) _UpperCAmelCase = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowerCAmelCase__ ) _UpperCAmelCase = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()} _UpperCAmelCase = self.step_count self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path _UpperCAmelCase = flatten_list([x['preds'] for x in outputs] ) return { "log": all_metrics, "preds": preds, F"""{prefix}_loss""": loss, F"""{prefix}_{self.val_metric}""": metric_tensor, } def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') _UpperCAmelCase = self.model.generate( batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) _UpperCAmelCase = (time.time() - ta) / batch['input_ids'].shape[0] _UpperCAmelCase = self.ids_to_clean_text(lowerCAmelCase__ ) _UpperCAmelCase = self.ids_to_clean_text(batch['labels'] ) _UpperCAmelCase = self._step(lowerCAmelCase__ ) _UpperCAmelCase = dict(zip(self.loss_names , lowerCAmelCase__ ) ) _UpperCAmelCase = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) ) base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ ) return base_metrics def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" return self._generative_step(lowerCAmelCase__ ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" return self.validation_epoch_end(lowerCAmelCase__ , prefix='test' ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = self.n_obs[type_path] _UpperCAmelCase = self.target_lens[type_path] _UpperCAmelCase = self.dataset_class( self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , ) return dataset def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ): """simple docstring""" _UpperCAmelCase = self.get_dataset(lowerCAmelCase__ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": _UpperCAmelCase = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": _UpperCAmelCase = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ ) return dataloader def UpperCamelCase ( self ): """simple docstring""" return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size ) def UpperCamelCase ( self ): """simple docstring""" return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size ) @staticmethod def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ ) add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ ) parser.add_argument( '--max_source_length' , default=1024 , type=lowerCAmelCase__ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--max_target_length' , default=56 , type=lowerCAmelCase__ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--val_max_target_length' , default=142 , type=lowerCAmelCase__ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--test_max_target_length' , default=142 , type=lowerCAmelCase__ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument('--freeze_encoder' , action='store_true' ) parser.add_argument('--freeze_embeds' , action='store_true' ) parser.add_argument('--sortish_sampler' , action='store_true' , default=lowerCAmelCase__ ) parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowerCAmelCase__ ) parser.add_argument('--max_tokens_per_batch' , type=lowerCAmelCase__ , default=lowerCAmelCase__ ) parser.add_argument('--logger_name' , type=lowerCAmelCase__ , choices=['default', 'wandb', 'wandb_shared'] , default='default' ) parser.add_argument('--n_train' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='# examples. -1 means use all.' ) parser.add_argument('--n_val' , type=lowerCAmelCase__ , default=500 , required=lowerCAmelCase__ , help='# examples. -1 means use all.' ) parser.add_argument('--n_test' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='# examples. -1 means use all.' ) parser.add_argument( '--task' , type=lowerCAmelCase__ , default='summarization' , required=lowerCAmelCase__ , help='# examples. -1 means use all.' ) parser.add_argument('--label_smoothing' , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ ) parser.add_argument('--src_lang' , type=lowerCAmelCase__ , default='' , required=lowerCAmelCase__ ) parser.add_argument('--tgt_lang' , type=lowerCAmelCase__ , default='' , required=lowerCAmelCase__ ) parser.add_argument('--eval_beams' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ ) parser.add_argument( '--val_metric' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=['bleu', 'rouge2', 'loss', None] ) parser.add_argument('--eval_max_gen_length' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='never generate more than n tokens' ) parser.add_argument('--save_top_k' , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help='How many checkpoints to save' ) parser.add_argument( '--early_stopping_patience' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ) , ) return parser class __lowerCamelCase ( snake_case__): """simple docstring""" UpperCamelCase__ = 'translation' UpperCamelCase__ = ['loss'] UpperCamelCase__ = ['bleu'] UpperCamelCase__ = 'bleu' def __init__( self , UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCAmelCase = hparams.src_lang _UpperCAmelCase = hparams.tgt_lang def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ ) def __A ( __lowerCAmelCase , __lowerCAmelCase=None )-> SummarizationModule: """simple docstring""" Path(args.output_dir ).mkdir(exist_ok=__lowerCAmelCase ) check_output_dir(__lowerCAmelCase , expected_items=3 ) if model is None: if "summarization" in args.task: _UpperCAmelCase = SummarizationModule(__lowerCAmelCase ) else: _UpperCAmelCase = TranslationModule(__lowerCAmelCase ) _UpperCAmelCase = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('/tmp' ) or str(args.output_dir ).startswith('/var' ) ): _UpperCAmelCase = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger _UpperCAmelCase = os.environ.get('WANDB_PROJECT' , __lowerCAmelCase ) _UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=__lowerCAmelCase ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger _UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" ) if args.early_stopping_patience >= 0: _UpperCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: _UpperCAmelCase = False _UpperCAmelCase = args.val_metric == 'loss' _UpperCAmelCase = generic_train( __lowerCAmelCase , __lowerCAmelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , __lowerCAmelCase ) , early_stopping_callback=__lowerCAmelCase , logger=__lowerCAmelCase , ) pickle_save(model.hparams , model.output_dir / 'hparams.pkl' ) if not args.do_predict: return model _UpperCAmelCase = '' _UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=__lowerCAmelCase ) ) if checkpoints: _UpperCAmelCase = checkpoints[-1] _UpperCAmelCase = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": _a = argparse.ArgumentParser() _a = pl.Trainer.add_argparse_args(parser) _a = SummarizationModule.add_model_specific_args(parser, os.getcwd()) _a = parser.parse_args() main(args)
39
'''simple docstring''' import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_attention_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_choices def snake_case__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_attention_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self : Union[str, Any] ) -> str: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Dict = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' _UpperCamelCase = FlaxAlbertModelTester(self ) @slow def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0] _UpperCamelCase = (1, 11, 768) self.assertEqual(output.shape , lowerCAmelCase__ ) _UpperCamelCase = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
324
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) A : str = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
357
import argparse import math import traceback import dateutil.parser as date_parser import requests def __lowerCamelCase ( __a :str ) -> Optional[int]: """simple docstring""" A__ = {} A__ = job["""started_at"""] A__ = job["""completed_at"""] A__ = date_parser.parse(__a ) A__ = date_parser.parse(__a ) A__ = round((end_datetime - start_datetime).total_seconds() / 60.0 ) A__ = start A__ = end A__ = duration_in_min return job_info def __lowerCamelCase ( __a :Optional[Any] , __a :List[str]=None ) -> List[Any]: """simple docstring""" A__ = None if token is not None: A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'} A__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' A__ = requests.get(__a , headers=__a ).json() A__ = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} ) A__ = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 ) for i in range(__a ): A__ = requests.get(url + F'&page={i + 2}' , headers=__a ).json() job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} ) return job_time except Exception: print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} if __name__ == "__main__": A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') A : Dict = parser.parse_args() A : List[Any] = get_job_time(args.workflow_run_id) A : int = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'''{k}: {v["duration"]}''')
276
0
"""simple docstring""" import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Dict =DownBlockaD # noqa F405 __UpperCAmelCase : List[str] ='''down''' def snake_case ( self ): __lowerCAmelCase = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : int =ResnetDownsampleBlockaD # noqa F405 __UpperCAmelCase : int ='''down''' def snake_case ( self ): __lowerCAmelCase = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Optional[int] =AttnDownBlockaD # noqa F405 __UpperCAmelCase : Union[str, Any] ='''down''' def snake_case ( self ): __lowerCAmelCase = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : str =CrossAttnDownBlockaD # noqa F405 __UpperCAmelCase : List[str] ='''down''' def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict def snake_case ( self ): __lowerCAmelCase = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Optional[int] =SimpleCrossAttnDownBlockaD # noqa F405 __UpperCAmelCase : str ='''down''' @property def snake_case ( self ): return super().get_dummy_input(include_encoder_hidden_states=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def snake_case ( self ): __lowerCAmelCase = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : List[str] =SkipDownBlockaD # noqa F405 __UpperCAmelCase : int ='''down''' @property def snake_case ( self ): return super().get_dummy_input(include_skip_sample=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] =AttnSkipDownBlockaD # noqa F405 __UpperCAmelCase : str ='''down''' @property def snake_case ( self ): return super().get_dummy_input(include_skip_sample=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Optional[Any] =DownEncoderBlockaD # noqa F405 __UpperCAmelCase : Optional[Any] ='''down''' @property def snake_case ( self ): return super().get_dummy_input(include_temb=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase = { "in_channels": 32, "out_channels": 32, } __lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def snake_case ( self ): __lowerCAmelCase = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : List[Any] =AttnDownEncoderBlockaD # noqa F405 __UpperCAmelCase : Dict ='''down''' @property def snake_case ( self ): return super().get_dummy_input(include_temb=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase = { "in_channels": 32, "out_channels": 32, } __lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def snake_case ( self ): __lowerCAmelCase = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] =UNetMidBlockaD # noqa F405 __UpperCAmelCase : Union[str, Any] ='''mid''' def snake_case ( self ): __lowerCAmelCase = { "in_channels": 32, "temb_channels": 1_28, } __lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def snake_case ( self ): __lowerCAmelCase = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : List[Any] =UNetMidBlockaDCrossAttn # noqa F405 __UpperCAmelCase : List[Any] ='''mid''' def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict def snake_case ( self ): __lowerCAmelCase = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : int =UNetMidBlockaDSimpleCrossAttn # noqa F405 __UpperCAmelCase : List[str] ='''mid''' @property def snake_case ( self ): return super().get_dummy_input(include_encoder_hidden_states=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict def snake_case ( self ): __lowerCAmelCase = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Optional[int] =UpBlockaD # noqa F405 __UpperCAmelCase : int ='''up''' @property def snake_case ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Any =ResnetUpsampleBlockaD # noqa F405 __UpperCAmelCase : List[str] ='''up''' @property def snake_case ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : List[str] =CrossAttnUpBlockaD # noqa F405 __UpperCAmelCase : Tuple ='''up''' @property def snake_case ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict def snake_case ( self ): __lowerCAmelCase = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Tuple =SimpleCrossAttnUpBlockaD # noqa F405 __UpperCAmelCase : Dict ='''up''' @property def snake_case ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ , include_encoder_hidden_states=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common() __lowerCAmelCase = 32 return init_dict, inputs_dict def snake_case ( self ): __lowerCAmelCase = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : int =AttnUpBlockaD # noqa F405 __UpperCAmelCase : List[str] ='''up''' @property def snake_case ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ ) @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def snake_case ( self ): __lowerCAmelCase = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Tuple =SkipUpBlockaD # noqa F405 __UpperCAmelCase : str ='''up''' @property def snake_case ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Optional[Any] =AttnSkipUpBlockaD # noqa F405 __UpperCAmelCase : List[Any] ='''up''' @property def snake_case ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : str =UpDecoderBlockaD # noqa F405 __UpperCAmelCase : List[str] ='''up''' @property def snake_case ( self ): return super().get_dummy_input(include_temb=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase = {"in_channels": 32, "out_channels": 32} __lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def snake_case ( self ): __lowerCAmelCase = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7] super().test_output(UpperCAmelCase__ ) class _UpperCamelCase ( __lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Dict =AttnUpDecoderBlockaD # noqa F405 __UpperCAmelCase : Tuple ='''up''' @property def snake_case ( self ): return super().get_dummy_input(include_temb=UpperCAmelCase__ ) def snake_case ( self ): __lowerCAmelCase = {"in_channels": 32, "out_channels": 32} __lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def snake_case ( self ): __lowerCAmelCase = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8] super().test_output(UpperCAmelCase__ )
57
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : Optional[int] ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Tuple ) -> Any: lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('sample_euler' ) lowerCAmelCase = 'A painting of a squirrel eating a burger' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self : List[str] ) -> Dict: lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('sample_euler' ) lowerCAmelCase = 'A painting of a squirrel eating a burger' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]: lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('sample_dpmpp_2m' ) lowerCAmelCase = 'A painting of a squirrel eating a burger' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sd_pipe( [prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase = np.array( [0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
4
0
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = None lowerCamelCase__ = BloomTokenizerFast lowerCamelCase__ = BloomTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = '''tokenizer_file''' lowerCamelCase__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def __A ( self : str ) -> Tuple: super().setUp() SCREAMING_SNAKE_CASE_ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self : List[str] , **__magic_name__ : int ) -> List[Any]: kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ ) def __A ( self : Any ) -> int: SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"] SCREAMING_SNAKE_CASE_ = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]] SCREAMING_SNAKE_CASE_ = tokenizer.batch_encode_plus(__magic_name__ )["input_ids"] self.assertListEqual(__magic_name__ , __magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def __A ( self : Union[str, Any] , __magic_name__ : Tuple=6 ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input SCREAMING_SNAKE_CASE_ = "This is a simple input" SCREAMING_SNAKE_CASE_ = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE_ = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests try: tokenizer_r.encode(__magic_name__ , max_length=__magic_name__ ) tokenizer_r.encode_plus(__magic_name__ , max_length=__magic_name__ ) tokenizer_r.batch_encode_plus(__magic_name__ , max_length=__magic_name__ ) tokenizer_r.encode(__magic_name__ , max_length=__magic_name__ ) tokenizer_r.batch_encode_plus(__magic_name__ , max_length=__magic_name__ ) except ValueError: self.fail("Bloom Tokenizer should be able to deal with padding" ) SCREAMING_SNAKE_CASE_ = None # Hotfixing padding = None self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="max_length" ) # Simple input self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="max_length" ) # Simple input self.assertRaises( __magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="max_length" , ) # Pair input self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="max_length" ) # Pair input self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="max_length" ) # Pair input self.assertRaises( __magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="max_length" , ) def __A ( self : Optional[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ = load_dataset("xnli" , "all_languages" , split="test" , streaming=__magic_name__ ) SCREAMING_SNAKE_CASE_ = next(iter(__magic_name__ ) )["premise"] # pick up one data SCREAMING_SNAKE_CASE_ = list(sample_data.values() ) SCREAMING_SNAKE_CASE_ = list(map(tokenizer.encode , __magic_name__ ) ) SCREAMING_SNAKE_CASE_ = [tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) for x in output_tokens] self.assertListEqual(__magic_name__ , __magic_name__ ) def __A ( self : List[str] ) -> Tuple: # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
305
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=SCREAMING_SNAKE_CASE__ ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) lowerCamelCase__ = Features({'''text''': Value('''string''' )} ) lowerCamelCase__ = Features({'''summary''': Value('''string''' )} ) lowerCamelCase__ = "text" lowerCamelCase__ = "summary" @property def __A ( self : Dict ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
305
1
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def __SCREAMING_SNAKE_CASE ( A_="ro" , A_="en" , A_="wmt16" , A_=None ): try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''' ) lowerCAmelCase__ : Any = f'{src_lang}-{tgt_lang}' print(f'Converting {dataset}-{pair}' ) lowerCAmelCase__ : Optional[Any] = datasets.load_dataset(A_ , A_ ) if save_dir is None: lowerCAmelCase__ : List[Any] = f'{dataset}-{pair}' lowerCAmelCase__ : str = Path(A_ ) save_dir.mkdir(exist_ok=A_ ) for split in ds.keys(): print(f'Splitting {split} with {ds[split].num_rows} records' ) # to save to val.source, val.target like summary datasets lowerCAmelCase__ : Union[str, Any] = '''val''' if split == '''validation''' else split lowerCAmelCase__ : List[str] = save_dir.joinpath(f'{fn}.source' ) lowerCAmelCase__ : Optional[Any] = save_dir.joinpath(f'{fn}.target' ) lowerCAmelCase__ : Optional[Any] = src_path.open('''w+''' ) lowerCAmelCase__ : Optional[int] = tgt_path.open('''w+''' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): lowerCAmelCase__ : Dict = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''' ) tgt_fp.write(ex[tgt_lang] + '''\n''' ) print(f'Saved {dataset} dataset to {save_dir}' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
106
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __UpperCamelCase : Tuple = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') __UpperCamelCase : Optional[int] = F'''https://www.google.com/search?q={query}&num=100''' __UpperCamelCase : Optional[Any] = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: __UpperCamelCase : Union[str, Any] = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: __UpperCamelCase : str = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
106
1
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return model @property def lowerCamelCase_ ( self : str ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , ) return model @property def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , ) UpperCamelCase = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return vqvae, unet @slow def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) UpperCamelCase = DDPMScheduler() UpperCamelCase = AudioDiffusionPipeline(vqvae=lowerCamelCase_ , unet=self.dummy_unet , mel=lowerCamelCase_ , scheduler=lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(42 ) UpperCamelCase = pipe(generator=lowerCamelCase_ , steps=4 ) UpperCamelCase = output.audios[0] UpperCamelCase = output.images[0] UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(42 ) UpperCamelCase = pipe(generator=lowerCamelCase_ , steps=4 , return_dict=lowerCamelCase_ ) UpperCamelCase = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10] UpperCamelCase = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 UpperCamelCase = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) UpperCamelCase = DDIMScheduler() UpperCamelCase = self.dummy_vqvae_and_unet UpperCamelCase = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCamelCase_ , scheduler=lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) np.random.seed(0 ) UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(42 ) UpperCamelCase = pipe(raw_audio=lowerCamelCase_ , generator=lowerCamelCase_ , start_step=5 , steps=10 ) UpperCamelCase = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] UpperCamelCase = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 UpperCamelCase = self.dummy_unet_condition UpperCamelCase = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCamelCase_ , mel=lowerCamelCase_ , scheduler=lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) np.random.seed(0 ) UpperCamelCase = torch.rand((1, 1, 10) ) UpperCamelCase = pipe(generator=lowerCamelCase_ , encoding=lowerCamelCase_ ) UpperCamelCase = output.images[0] UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] UpperCamelCase = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = torch_device UpperCamelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(42 ) UpperCamelCase = pipe(generator=lowerCamelCase_ ) UpperCamelCase = output.audios[0] UpperCamelCase = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] UpperCamelCase = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
165
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore _SCREAMING_SNAKE_CASE = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" _SCREAMING_SNAKE_CASE = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("""\n""".join(upper_files) + """\n""") _SCREAMING_SNAKE_CASE = [file for file in filepaths if """ """ in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("""\n""".join(space_files) + """\n""") _SCREAMING_SNAKE_CASE = [file for file in filepaths if """-""" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("""\n""".join(hyphen_files) + """\n""") _SCREAMING_SNAKE_CASE = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("""\n""".join(nodir_files) + """\n""") _SCREAMING_SNAKE_CASE = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
165
1
from __future__ import annotations _snake_case : Any = [] def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): for i in range(len(__lowerCamelCase ) ): if board[row][i] == 1: return False for i in range(len(__lowerCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(__lowerCamelCase , -1 , -1 ) , range(__lowerCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__lowerCamelCase , -1 , -1 ) , range(__lowerCamelCase , len(__lowerCamelCase ) ) ): if board[i][j] == 1: return False return True def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): if row >= len(__lowerCamelCase ): solution.append(__lowerCamelCase ) printboard(__lowerCamelCase ) print() return True for i in range(len(__lowerCamelCase ) ): if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case : Optional[Any] = 1 solve(__lowerCamelCase , row + 1 ) __snake_case : Union[str, Any] = 0 return False def lowerCAmelCase_ ( __lowerCamelCase ): for i in range(len(__lowerCamelCase ) ): for j in range(len(__lowerCamelCase ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) _snake_case : Union[str, Any] = 8 _snake_case : List[Any] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
123
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging _snake_case : str = logging.get_logger(__name__) # pylint: disable=invalid-name class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , ) -> Dict: super().__init__() self.register_modules( vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , ) def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __snake_case : Tuple = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCamelCase ) def __snake_case ( self : str ) -> List[str]: self.enable_attention_slicing(lowerCamelCase ) @torch.no_grad() def __call__( self : Dict , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , lowerCamelCase : Optional[torch.FloatTensor] = None , **lowerCamelCase : Any , ) -> Optional[Any]: if isinstance(lowerCamelCase , lowerCamelCase ): __snake_case : Optional[int] = 1 elif isinstance(lowerCamelCase , lowerCamelCase ): __snake_case : Tuple = len(lowerCamelCase ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCamelCase , lowerCamelCase ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(lowerCamelCase )}.' ) # get prompt text embeddings __snake_case : Tuple = self.tokenizer( lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) __snake_case : Optional[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) __snake_case : str = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __snake_case : str = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __snake_case , __snake_case , __snake_case : int = text_embeddings.shape __snake_case : Any = text_embeddings.repeat(1 , lowerCamelCase , 1 ) __snake_case : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __snake_case : List[str] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __snake_case : List[str] if negative_prompt is None: __snake_case : Any = [""] elif type(lowerCamelCase ) is not type(lowerCamelCase ): raise TypeError( F'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !=' F' {type(lowerCamelCase )}.' ) elif isinstance(lowerCamelCase , lowerCamelCase ): __snake_case : int = [negative_prompt] elif batch_size != len(lowerCamelCase ): raise ValueError( F'`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:' F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches' " the batch size of `prompt`." ) else: __snake_case : Tuple = negative_prompt __snake_case : str = text_input_ids.shape[-1] __snake_case : Dict = self.tokenizer( lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="pt" , ) __snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case : Tuple = uncond_embeddings.shape[1] __snake_case : Any = uncond_embeddings.repeat(lowerCamelCase , lowerCamelCase , 1 ) __snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __snake_case : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __snake_case : Optional[int] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __snake_case : Optional[Any] = torch.randn( lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(self.device ) __snake_case : int = torch.randn(lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to( self.device ) else: __snake_case : Union[str, Any] = torch.randn( lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase ) __snake_case : int = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase ) else: if latents_reference.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) __snake_case : Union[str, Any] = latents_reference.to(self.device ) __snake_case : List[str] = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __snake_case : Union[str, Any] = (latents_shape[3] - latents_shape_reference[3]) // 2 __snake_case : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2 __snake_case : str = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __snake_case : List[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __snake_case : Tuple = 0 if dx < 0 else dx __snake_case : Union[str, Any] = 0 if dy < 0 else dy __snake_case : Any = max(-dx , 0 ) __snake_case : Optional[int] = max(-dy , 0 ) # import pdb # pdb.set_trace() __snake_case : List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(lowerCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __snake_case : Dict = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __snake_case : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __snake_case : Optional[Any] = {} if accepts_eta: __snake_case : List[Any] = eta for i, t in enumerate(self.progress_bar(lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance __snake_case : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __snake_case : List[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) # predict the noise residual __snake_case : str = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample # perform guidance if do_classifier_free_guidance: __snake_case , __snake_case : str = noise_pred.chunk(2 ) __snake_case : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __snake_case : Optional[Any] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCamelCase , lowerCamelCase , lowerCamelCase ) __snake_case : List[Any] = 1 / 0.1_82_15 * latents __snake_case : Dict = self.vae.decode(lowerCamelCase ).sample __snake_case : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __snake_case : Union[str, Any] = self.feature_extractor(self.numpy_to_pil(lowerCamelCase ) , return_tensors="pt" ).to( self.device ) __snake_case , __snake_case : str = self.safety_checker( images=lowerCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __snake_case : Dict = None if output_type == "pil": __snake_case : Any = self.numpy_to_pil(lowerCamelCase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
123
1
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class _UpperCAmelCase : def __init__( self : Dict , _lowercase : int , _lowercase : List[str]=13 , _lowercase : Dict=32 , _lowercase : Any=2 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=16 , _lowercase : Optional[int]=[1, 2, 1] , _lowercase : int=[2, 2, 4] , _lowercase : Optional[Any]=2 , _lowercase : Union[str, Any]=2.0 , _lowercase : Any=True , _lowercase : Optional[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.1 , _lowercase : str="gelu" , _lowercase : List[Any]=False , _lowercase : List[Any]=True , _lowercase : Optional[Any]=0.02 , _lowercase : str=1E-5 , _lowercase : str=True , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Any=10 , _lowercase : int=8 , _lowercase : Optional[Any]=["stage1", "stage2", "stage3"] , _lowercase : Optional[Any]=[1, 2, 3] , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = image_size __UpperCAmelCase = patch_size __UpperCAmelCase = num_channels __UpperCAmelCase = embed_dim __UpperCAmelCase = depths __UpperCAmelCase = num_heads __UpperCAmelCase = window_size __UpperCAmelCase = mlp_ratio __UpperCAmelCase = qkv_bias __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = drop_path_rate __UpperCAmelCase = hidden_act __UpperCAmelCase = use_absolute_embeddings __UpperCAmelCase = patch_norm __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = initializer_range __UpperCAmelCase = is_training __UpperCAmelCase = scope __UpperCAmelCase = use_labels __UpperCAmelCase = type_sequence_label_size __UpperCAmelCase = encoder_stride __UpperCAmelCase = out_features __UpperCAmelCase = out_indices def a ( self : int ): __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase = self.get_config() return config, pixel_values, labels def a ( self : Dict ): return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int ): __UpperCAmelCase = MaskFormerSwinModel(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model(_lowercase ) __UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a ( self : int , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Dict ): __UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model(_lowercase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_lowercase ): __UpperCAmelCase = ['''stem'''] __UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase ) def a ( self : Optional[int] ): __UpperCAmelCase = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs __UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : List[Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) a__ : Optional[int] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} a__ : List[str] = False a__ : int = False a__ : str = False a__ : str = False a__ : Any = False def a ( self : Optional[Any] ): __UpperCAmelCase = MaskFormerSwinModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=_lowercase , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with''' ''' `nn.DataParallel`''' ) ) def a ( self : int ): pass def a ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a ( self : str ): return def a ( self : Optional[Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowercase ) def a ( self : Optional[int] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowercase ) @unittest.skip('''Swin does not use inputs_embeds''' ) def a ( self : List[Any] ): pass @unittest.skip('''Swin does not support feedforward chunking''' ) def a ( self : str ): pass def a ( self : Union[str, Any] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(_lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) ) def a ( self : Union[str, Any] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(_lowercase ) __UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase = [*signature.parameters.keys()] __UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowercase ) @unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' ) def a ( self : Optional[Any] ): pass @unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' ) def a ( self : Optional[Any] ): pass def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : Tuple ): __UpperCAmelCase = model_class(_lowercase ) model.to(_lowercase ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) ) __UpperCAmelCase = outputs.hidden_states __UpperCAmelCase = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_lowercase ) , _lowercase ) # Swin has a different seq_length __UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a ( self : str ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __UpperCAmelCase = True self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = 3 __UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __UpperCAmelCase = True self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) ) @unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' ) def a ( self : Any ): pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def a ( self : str ): pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def a ( self : Tuple ): pass def a ( self : Tuple ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_lowercase : List[str] ): __UpperCAmelCase = 0 return t def check_equivalence(_lowercase : List[Any] , _lowercase : Any , _lowercase : str , _lowercase : List[str]={} ): with torch.no_grad(): __UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase ) __UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple() def recursive_check(_lowercase : Dict , _lowercase : Optional[Any] ): if isinstance(_lowercase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ): recursive_check(_lowercase , _lowercase ) elif isinstance(_lowercase , _lowercase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_lowercase , _lowercase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1E-5 ) , msg=( '''Tuple and dict output are not equal. Difference:''' F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has''' F''' `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}.''' ) , ) recursive_check(_lowercase , _lowercase ) for model_class in self.all_model_classes: __UpperCAmelCase = model_class(_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase ) check_equivalence(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) check_equivalence(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase ) check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} ) @require_torch class _UpperCAmelCase ( unittest.TestCase , _lowerCAmelCase ): a__ : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else () a__ : List[str] = MaskFormerSwinConfig def a ( self : List[str] ): __UpperCAmelCase = MaskFormerSwinModelTester(self ) def a ( self : List[Any] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = inputs_dict['''pixel_values'''].shape[0] for backbone_class in self.all_model_classes: __UpperCAmelCase = backbone_class(_lowercase ) backbone.to(_lowercase ) backbone.eval() __UpperCAmelCase = backbone(**_lowercase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _lowercase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True __UpperCAmelCase = backbone(**_lowercase , output_hidden_states=_lowercase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: __UpperCAmelCase = backbone(**_lowercase , output_attentions=_lowercase ) self.assertIsNotNone(outputs.attentions )
86
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": _lowercase : int = pd.read_csv('sample_data.csv', header=None) _lowercase : str = df.shape[:1][0] # If you're using some other dataset input the target column _lowercase : Optional[int] = df.iloc[:, 1:2] _lowercase : Optional[int] = actual_data.values.reshape(len_data, 1) _lowercase : Any = MinMaxScaler().fit_transform(actual_data) _lowercase : Dict = 10 _lowercase : List[str] = 5 _lowercase : Any = 20 _lowercase : Optional[int] = len_data - periods * look_back _lowercase : Optional[int] = actual_data[:division] _lowercase : Optional[int] = actual_data[division - look_back :] _lowercase ,_lowercase : Tuple = [], [] _lowercase ,_lowercase : Optional[Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) _lowercase : List[str] = np.array(train_x) _lowercase : str = np.array(test_x) _lowercase : Union[str, Any] = np.array([list(i.ravel()) for i in train_y]) _lowercase : List[Any] = np.array([list(i.ravel()) for i in test_y]) _lowercase : str = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss='mean_squared_error', optimizer='adam') _lowercase : str = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) _lowercase : str = model.predict(x_test)
86
1
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=10 ): """simple docstring""" A__ = [] for _ in range(UpperCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=10 ): """simple docstring""" A__ = [] for step in range(UpperCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A__ = os.path.join(UpperCamelCase_ , 'schedule.bin' ) torch.save(scheduler.state_dict() , UpperCamelCase_ ) A__ = torch.load(UpperCamelCase_ ) scheduler.load_state_dict(UpperCamelCase_ ) return lrs @require_torch class UpperCamelCase__( unittest.TestCase ): def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int: self.assertEqual(len(UpperCAmelCase__ ) ,len(UpperCAmelCase__ ) ) for a, b in zip(UpperCAmelCase__ ,UpperCAmelCase__ ): self.assertAlmostEqual(UpperCAmelCase__ ,UpperCAmelCase__ ,delta=UpperCAmelCase__ ) def snake_case__ ( self ) -> str: A__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=UpperCAmelCase__ ) A__ = torch.tensor([0.4, 0.2, -0.5] ) A__ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ = AdamW(params=[w] ,lr=2e-1 ,weight_decay=0.0 ) for _ in range(1_00 ): A__ = criterion(UpperCAmelCase__ ,UpperCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1e-2 ) def snake_case__ ( self ) -> str: A__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=UpperCAmelCase__ ) A__ = torch.tensor([0.4, 0.2, -0.5] ) A__ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ = Adafactor( params=[w] ,lr=1e-2 ,eps=(1e-30, 1e-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=UpperCAmelCase__ ,weight_decay=0.0 ,relative_step=UpperCAmelCase__ ,scale_parameter=UpperCAmelCase__ ,warmup_init=UpperCAmelCase__ ,) for _ in range(10_00 ): A__ = criterion(UpperCAmelCase__ ,UpperCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1e-2 ) @require_torch class UpperCamelCase__( unittest.TestCase ): lowerCAmelCase__ : Any = nn.Linear(50 , 50 ) if is_torch_available() else None lowerCAmelCase__ : Optional[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None lowerCAmelCase__ : Tuple = 10 def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> Optional[int]: self.assertEqual(len(UpperCAmelCase__ ) ,len(UpperCAmelCase__ ) ) for a, b in zip(UpperCAmelCase__ ,UpperCAmelCase__ ): self.assertAlmostEqual(UpperCAmelCase__ ,UpperCAmelCase__ ,delta=UpperCAmelCase__ ,msg=UpperCAmelCase__ ) def snake_case__ ( self ) -> Optional[int]: A__ = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A__ = { get_constant_schedule: ({}, [1_0.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4], ), } for scheduler_func, data in scheds.items(): A__ , A__ = data A__ = scheduler_func(self.optimizer ,**UpperCAmelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 ) A__ = unwrap_schedule(UpperCAmelCase__ ,self.num_steps ) self.assertListAlmostEqual( UpperCAmelCase__ ,UpperCAmelCase__ ,tol=1e-2 ,msg=f'''failed for {scheduler_func} in normal scheduler''' ,) A__ = scheduler_func(self.optimizer ,**UpperCAmelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase__ ) # wrap to test picklability of the schedule A__ = unwrap_and_save_reload_schedule(UpperCAmelCase__ ,self.num_steps ) self.assertListEqual(UpperCAmelCase__ ,UpperCAmelCase__ ,msg=f'''failed for {scheduler_func} in save and reload''' ) class UpperCamelCase__: def __init__( self ,__UpperCAmelCase ) -> List[str]: A__ = fn def __call__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: return self.fn(*UpperCAmelCase__ ,**UpperCAmelCase__ ) @classmethod def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]: A__ = list(map(self ,scheduler.lr_lambdas ) )
221
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx" def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ): __lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) ) __lowercase = torch.manual_seed(UpperCAmelCase__ ) __lowercase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _lowercase ( self : Any ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def _lowercase ( self : Optional[Any] ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : int ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : str ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : Any ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def _lowercase ( self : Tuple ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self : Dict ): __lowercase = ort.SessionOptions() __lowercase = False return options def _lowercase ( self : Dict ): __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowercase = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "A fantasy landscape, trending on artstation" __lowercase = torch.manual_seed(0 ) __lowercase = pipe( prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", ) __lowercase = output.images __lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _lowercase ( self : str ): __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowercase = init_image.resize((1_2_8, 1_2_8) ) __lowercase = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" ) __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "A fantasy landscape, trending on artstation" __lowercase = torch.manual_seed(0 ) __lowercase = pipe( prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", ) __lowercase = output.images __lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
17
0
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer UpperCAmelCase_ : Any = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase__ ): '''simple docstring''' __UpperCamelCase = '''AutoTokenizer''' __UpperCamelCase = ['''tokenizer'''] __UpperCamelCase = { '''semantic_prompt''': 1, '''coarse_prompt''': 2, '''fine_prompt''': 2, } def __init__( self : Dict , lowercase_ : Any , lowercase_ : Optional[int]=None): '''simple docstring''' super().__init__(_a) SCREAMING_SNAKE_CASE_ : Dict = speaker_embeddings @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple , lowercase_ : Any , lowercase_ : Dict="speaker_embeddings_path.json" , **lowercase_ : Any): '''simple docstring''' if speaker_embeddings_dict_path is not None: SCREAMING_SNAKE_CASE_ : Optional[Any] = get_file_from_repo( _a , _a , subfolder=kwargs.pop('''subfolder''' , _a) , cache_dir=kwargs.pop('''cache_dir''' , _a) , force_download=kwargs.pop('''force_download''' , _a) , proxies=kwargs.pop('''proxies''' , _a) , resume_download=kwargs.pop('''resume_download''' , _a) , local_files_only=kwargs.pop('''local_files_only''' , _a) , use_auth_token=kwargs.pop('''use_auth_token''' , _a) , revision=kwargs.pop('''revision''' , _a) , ) if speaker_embeddings_path is None: logger.warning( F'`{os.path.join(_a , _a)}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.') SCREAMING_SNAKE_CASE_ : Tuple = None else: with open(_a) as speaker_embeddings_json: SCREAMING_SNAKE_CASE_ : Tuple = json.load(_a) else: SCREAMING_SNAKE_CASE_ : Any = None SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(_a , **_a) return cls(tokenizer=_a , speaker_embeddings=_a) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Tuple , lowercase_ : List[str]="speaker_embeddings_path.json" , lowercase_ : Tuple="speaker_embeddings" , lowercase_ : bool = False , **lowercase_ : List[Any] , ): '''simple docstring''' if self.speaker_embeddings is not None: os.makedirs(os.path.join(_a , _a , '''v2''') , exist_ok=_a) SCREAMING_SNAKE_CASE_ : Optional[int] = {} SCREAMING_SNAKE_CASE_ : int = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": SCREAMING_SNAKE_CASE_ : Tuple = self._load_voice_preset(_a) SCREAMING_SNAKE_CASE_ : List[Any] = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , _a , F'{prompt_key}_{key}') , voice_preset[key] , allow_pickle=_a , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_a , F'{prompt_key}_{key}.npy') SCREAMING_SNAKE_CASE_ : Optional[int] = tmp_dict with open(os.path.join(_a , _a) , '''w''') as fp: json.dump(_a , _a) super().save_pretrained(_a , _a , **_a) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : str = None , **lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.speaker_embeddings[voice_preset] SCREAMING_SNAKE_CASE_ : int = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].') SCREAMING_SNAKE_CASE_ : Optional[Any] = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''') , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , _a) , cache_dir=kwargs.pop('''cache_dir''' , _a) , force_download=kwargs.pop('''force_download''' , _a) , proxies=kwargs.pop('''proxies''' , _a) , resume_download=kwargs.pop('''resume_download''' , _a) , local_files_only=kwargs.pop('''local_files_only''' , _a) , use_auth_token=kwargs.pop('''use_auth_token''' , _a) , revision=kwargs.pop('''revision''' , _a) , ) if path is None: raise ValueError( F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/") , voice_preset_paths[key])}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.') SCREAMING_SNAKE_CASE_ : int = np.load(_a) return voice_preset_dict def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[dict] = None): '''simple docstring''' for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'Voice preset unrecognized, missing {key} as a key.') if not isinstance(voice_preset[key] , np.ndarray): raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.') if len(voice_preset[key].shape) != self.preset_shape[key]: raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.') def __call__( self : int , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]="pt" , lowercase_ : List[Any]=256 , lowercase_ : List[Any]=False , lowercase_ : str=True , lowercase_ : Optional[Any]=False , **lowercase_ : str , ): '''simple docstring''' if voice_preset is not None and not isinstance(_a , _a): if ( isinstance(_a , _a) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): SCREAMING_SNAKE_CASE_ : Tuple = self._load_voice_preset(_a) else: if isinstance(_a , _a) and not voice_preset.endswith('''.npz'''): SCREAMING_SNAKE_CASE_ : int = voice_preset + '.npz' SCREAMING_SNAKE_CASE_ : Dict = np.load(_a) if voice_preset is not None: self._validate_voice_preset_dict(_a , **_a) SCREAMING_SNAKE_CASE_ : List[str] = BatchFeature(data=_a , tensor_type=_a) SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer( _a , return_tensors=_a , padding='''max_length''' , max_length=_a , return_attention_mask=_a , return_token_type_ids=_a , add_special_tokens=_a , **_a , ) if voice_preset is not None: SCREAMING_SNAKE_CASE_ : Any = voice_preset return encoded_text
355
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""] class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = dataset SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies def __iter__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for task in range(self.n_tasks): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip()) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''') for task in range(self.n_tasks): for _ in range(self.n_copies): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = start_length SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :]) SCREAMING_SNAKE_CASE_ : Tuple = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings)) return all(lowercase_) def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a ) # last string should be "" return "".join(string_list[:-2] ) def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens for step, batch in tqdm(enumerate(__a ) ): with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1] SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a ) # each task is generated batch_size times SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes( __a , dim=1 , pad_index=tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy() SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(__a , __a ): gen_token_dict[task].append(__a ) SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) code_gens[task].append(remove_last_block(__a ) ) return code_gens def _A () -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing SCREAMING_SNAKE_CASE_ : str = '''false''' if args.num_workers is None: SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate SCREAMING_SNAKE_CASE_ : Tuple = Accelerator() set_seed(args.seed , device_specific=__a ) # Load model and tokenizer SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings SCREAMING_SNAKE_CASE_ : List[str] = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ), } # Load evaluation dataset and metric SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' ) SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' ) SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a ) # do not confuse args.batch_size, which is actually the num_return_sequences SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a ) SCREAMING_SNAKE_CASE_ : List[Any] = complete_code( __a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , ) if accelerator.is_main_process: SCREAMING_SNAKE_CASE_ : int = [] for task in tqdm(range(__a ) ): SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test'''] SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute( references=__a , predictions=__a , num_workers=args.num_workers ) print(f'Results: {pass_at_k}' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(__a , __a ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
318
0
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline _snake_case = { "n_samples": 64, "horizon": 32, "num_inference_steps": 20, "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network "scale_grad_by_std": True, "scale": 0.1, "eta": 0.0, "t_grad_cutoff": 2, "device": "cpu", } if __name__ == "__main__": _snake_case = "hopper-medium-v2" _snake_case = gym.make(env_name) _snake_case = ValueGuidedRLPipeline.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", env=env, ) env.seed(0) _snake_case = env.reset() _snake_case = 0 _snake_case = 0 _snake_case = 1000 _snake_case = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy _snake_case = pipeline(obs, planning_horizon=32) # execute action in environment _snake_case, _snake_case, _snake_case, _snake_case = env.step(denorm_actions) _snake_case = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) _snake_case = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
36
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if expected is RuntimeError: with pytest.raises(_lowerCamelCase ): _number_of_shards_in_gen_kwargs(_lowerCamelCase ) else: _lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase ) assert out == expected
36
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging snake_case__ = logging.get_logger(__name__) snake_case__ = { 'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class UpperCamelCase_ (UpperCamelCase__ ): """simple docstring""" _lowerCAmelCase = """perceiver""" def __init__( self : int , _lowerCamelCase : Optional[Any]=256 , _lowerCamelCase : List[str]=1280 , _lowerCamelCase : List[Any]=768 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : Dict=26 , _lowerCamelCase : Optional[Any]=8 , _lowerCamelCase : str=8 , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]="kv" , _lowerCamelCase : List[str]=1 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : str=0.1 , _lowerCamelCase : List[str]=0.02 , _lowerCamelCase : str=1E-12 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[int]=262 , _lowerCamelCase : Optional[Any]=2048 , _lowerCamelCase : List[Any]=56 , _lowerCamelCase : Any=[368, 496] , _lowerCamelCase : Tuple=16 , _lowerCamelCase : Optional[int]=1920 , _lowerCamelCase : Dict=16 , _lowerCamelCase : Union[str, Any]=[1, 16, 224, 224] , **_lowerCamelCase : Union[str, Any] , ): """simple docstring""" super().__init__(**__a ) A_ : List[str] = num_latents A_ : Dict = d_latents A_ : List[str] = d_model A_ : int = num_blocks A_ : Optional[int] = num_self_attends_per_block A_ : Union[str, Any] = num_self_attention_heads A_ : str = num_cross_attention_heads A_ : str = qk_channels A_ : List[Any] = v_channels A_ : str = cross_attention_shape_for_attention A_ : str = self_attention_widening_factor A_ : List[str] = cross_attention_widening_factor A_ : List[Any] = hidden_act A_ : Any = attention_probs_dropout_prob A_ : Optional[int] = initializer_range A_ : Dict = layer_norm_eps A_ : List[str] = use_query_residual # masked language modeling attributes A_ : Dict = vocab_size A_ : int = max_position_embeddings # image classification attributes A_ : Any = image_size # flow attributes A_ : List[Any] = train_size # multimodal autoencoding attributes A_ : Union[str, Any] = num_frames A_ : Optional[Any] = audio_samples_per_frame A_ : List[Any] = samples_per_patch A_ : Optional[Any] = output_shape class UpperCamelCase_ (UpperCamelCase__ ): """simple docstring""" @property def _a ( self : str ): """simple docstring""" if self.task == "multiple-choice": A_ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A_ : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def _a ( self : Dict ): """simple docstring""" return 1E-4 def _a ( self : int , _lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 40 , _lowerCamelCase : int = 40 , ): """simple docstring""" if isinstance(__a , __a ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A_ : Optional[Any] = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A_ : Union[str, Any] = preprocessor.num_special_tokens_to_add(__a ) A_ : Optional[int] = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a ) # Generate dummy inputs according to compute batch and sequence A_ : Optional[int] = [''' '''.join(['''a'''] ) * seq_length] * batch_size A_ : Any = dict(preprocessor(__a , return_tensors=__a ) ) A_ : Tuple = inputs.pop('''input_ids''' ) return inputs elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A_ : str = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch ) A_ : int = self._generate_dummy_images(__a , __a , __a , __a ) A_ : Union[str, Any] = dict(preprocessor(images=__a , return_tensors=__a ) ) A_ : int = inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
367
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class a__ ( UpperCAmelCase__ ): def __init__( self : List[Any] , a : List[Any] , a : Dict ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM __lowerCamelCase = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=a , scheduler=a ) @torch.no_grad() def __call__( self : List[Any] , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : float = 0.0 , a : int = 50 , a : Optional[bool] = None , a : Optional[str] = "pil" , a : bool = True , ): """simple docstring""" if isinstance(self.unet.config.sample_size , a ): __lowerCamelCase = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: __lowerCamelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(a , a ) and len(a ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(a )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) __lowerCamelCase = randn_tensor(a , generator=a , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(a ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __lowerCamelCase = self.unet(a , a ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __lowerCamelCase = self.scheduler.step( a , a , a , eta=a , use_clipped_model_output=a , generator=a ).prev_sample __lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowerCamelCase = self.numpy_to_pil(a ) if not return_dict: return (image,) return ImagePipelineOutput(images=a )
67
'''simple docstring''' import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __UpperCAmelCase =logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None: __lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}""" dest_layers.load_state_dict(layers_to_copy.state_dict() ) __UpperCAmelCase ={ # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 1_2: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 1_1], 4: [0, 4, 8, 1_1], 6: [0, 2, 4, 7, 9, 1_1], 9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1], 1_2: list(range(1_2)), }, 1_6: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 1_5], 3: [0, 8, 1_5], 4: [0, 5, 1_0, 1_5], 6: [0, 3, 6, 9, 1_2, 1_5], 8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5], 9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5], 1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5], 1_6: list(range(1_6)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __UpperCAmelCase ={ # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]}, 1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]}, } def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: try: __lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first""" f""" {n_student}""" ) return list(range(UpperCamelCase__ ) ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[int]: if n_student > n_teacher: raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" ) elif n_teacher == n_student: return list(range(UpperCamelCase__ ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = "student" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]: __lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.''' assert (e is not None) or (d is not None), _msg if isinstance(UpperCamelCase__ , UpperCamelCase__ ): AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval() else: assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"""teacher must be a model or string got type {type(UpperCamelCase__ )}""" __lowerCamelCase = teacher.config.to_diff_dict() try: __lowerCamelCase , __lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: __lowerCamelCase = teacher_e if d is None: __lowerCamelCase = teacher_d init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} ) except AttributeError: # T5 if hasattr(teacher.config , '''num_encoder_layers''' ): __lowerCamelCase , __lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: __lowerCamelCase , __lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: __lowerCamelCase = teacher_e if d is None: __lowerCamelCase = teacher_d if hasattr(teacher.config , '''num_encoder_layers''' ): init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} ) else: init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(UpperCamelCase__ ) # Copy weights __lowerCamelCase = teacher.config_class(**UpperCamelCase__ ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. __lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save __lowerCamelCase , __lowerCamelCase = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) ) logger.info( f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to""" f""" {save_path}""" ) student.save_pretrained(UpperCamelCase__ ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: __lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ ) if d_layers_to_copy is None: __lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ ) try: if hasattr( UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ ) copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ ) logger.info( f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" ) __lowerCamelCase = { '''teacher_type''': teacher.config.model_type, '''copied_encoder_layers''': e_layers_to_copy, '''copied_decoder_layers''': d_layers_to_copy, } student.save_pretrained(UpperCamelCase__ ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
67
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowercase__ : Optional[Any] = transforms.Compose( [ transforms.Resize((2_56, 2_56)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __lowercase ( _a ): if isinstance(_a , torch.Tensor ): return image elif isinstance(_a , PIL.Image.Image ): snake_case_ : Optional[Any] = [image] snake_case_ : Optional[Any] = [trans(img.convert('''RGB''' ) ) for img in image] snake_case_ : Tuple = torch.stack(_a ) return image class _UpperCAmelCase ( lowerCAmelCase__): def __init__( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : int ): super().__init__() # make sure scheduler can always be converted to DDIM snake_case_ : List[Any] = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) def _snake_case ( self : int , lowercase_ : List[Any] ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" ) def _snake_case ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Any ): # get the original timestep using init_timestep snake_case_ : Any = min(int(num_inference_steps * strength ) , lowercase_ ) snake_case_ : List[Any] = max(num_inference_steps - init_timestep , 0 ) snake_case_ : Tuple = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _snake_case ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : Dict=None ): if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}" ) snake_case_ : Tuple = image.to(device=lowercase_ , dtype=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) snake_case_ : Optional[Any] = init_latents.shape snake_case_ : int = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents print('''add noise to latents at timestep''' , lowercase_ ) snake_case_ : Any = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) snake_case_ : Optional[int] = init_latents return latents @torch.no_grad() def __call__( self : List[Any] , lowercase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowercase_ : float = 0.8 , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : float = 0.0 , lowercase_ : int = 50 , lowercase_ : Optional[bool] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ): self.check_inputs(lowercase_ ) # 2. Preprocess image snake_case_ : List[Any] = preprocess(lowercase_ ) # 3. set timesteps self.scheduler.set_timesteps(lowercase_ , device=self.device ) snake_case_, snake_case_ : Union[str, Any] = self.get_timesteps(lowercase_ , lowercase_ , self.device ) snake_case_ : List[str] = timesteps[:1].repeat(lowercase_ ) # 4. Prepare latent variables snake_case_ : str = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ ) snake_case_ : Optional[Any] = latents # 5. Denoising loop for t in self.progress_bar(lowercase_ ): # 1. predict noise model_output snake_case_ : int = self.unet(lowercase_ , lowercase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 snake_case_ : int = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample snake_case_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) snake_case_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case_ : Dict = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowercase_ )
155
"""simple docstring""" import os def __lowercase ( _a ): snake_case_ : Tuple = len(grid[0] ) snake_case_ : Optional[int] = len(_a ) snake_case_ : Union[str, Any] = 0 snake_case_ : Union[str, Any] = 0 snake_case_ : List[Any] = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(_a ): for j in range(n_rows - 3 ): snake_case_ : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] snake_case_ : int = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: snake_case_ : Dict = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: snake_case_ : List[Any] = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) snake_case_ : List[str] = max( _a , _a , _a , _a ) if max_product > largest: snake_case_ : str = max_product return largest def __lowercase ( ): snake_case_ : Tuple = [] with open(os.path.dirname(_a ) + '''/grid.txt''' ) as file: for line in file: grid.append(line.strip('''\n''' ).split(''' ''' ) ) snake_case_ : List[str] = [[int(_a ) for i in grid[j]] for j in range(len(_a ) )] return largest_product(_a ) if __name__ == "__main__": print(solution())
155
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } lowerCAmelCase_ = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple: for attribute in key.split('''.''' ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowercase__ : Tuple = '''lm_head''' lowercase__ : Tuple = getattr(__lowerCamelCase , __lowerCamelCase ) if weight_type is not None: lowercase__ : List[Any] = getattr(__lowerCamelCase , __lowerCamelCase ).shape else: lowercase__ : Any = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase__ : Optional[int] = value elif weight_type == "weight_g": lowercase__ : Tuple = value elif weight_type == "weight_v": lowercase__ : Any = value elif weight_type == "bias": lowercase__ : int = value else: lowercase__ : Tuple = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict: lowercase__ : Tuple = [] lowercase__ : int = fairseq_model.state_dict() lowercase__ : str = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) lowercase__ : int = True else: for key, mapped_key in MAPPING.items(): lowercase__ : Union[str, Any] = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase__ : List[str] = True if "*" in mapped_key: lowercase__ : Tuple = name.split(__lowerCamelCase )[0].split('''.''' )[-2] lowercase__ : Union[str, Any] = mapped_key.replace('''*''' , __lowerCamelCase ) if "weight_g" in name: lowercase__ : int = '''weight_g''' elif "weight_v" in name: lowercase__ : Tuple = '''weight_v''' elif "bias" in name: lowercase__ : Tuple = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ : List[str] = '''weight''' else: lowercase__ : Dict = None set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) continue if not is_used: unused_weights.append(__lowerCamelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict: lowercase__ : Dict = full_name.split('''conv_layers.''' )[-1] lowercase__ : int = name.split('''.''' ) lowercase__ : str = int(items[0] ) lowercase__ : int = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase__ : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase__ : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowercase__ : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase__ : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__lowerCamelCase ) @torch.no_grad() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True ) -> List[Any]: if config_path is not None: lowercase__ : Union[str, Any] = UniSpeechConfig.from_pretrained(__lowerCamelCase ) else: lowercase__ : Optional[int] = UniSpeechConfig() if is_finetuned: if dict_path: lowercase__ : Union[str, Any] = Dictionary.load_from_json(__lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase__ : Optional[int] = target_dict.pad_index lowercase__ : Optional[Any] = target_dict.bos_index lowercase__ : Optional[int] = target_dict.eos_index lowercase__ : Tuple = len(target_dict.symbols ) lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , '''vocab.json''' ) if not os.path.isdir(__lowerCamelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCamelCase ) ) return os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) lowercase__ : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched lowercase__ : Any = 42 lowercase__ : Union[str, Any] = 43 with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__lowerCamelCase , __lowerCamelCase ) lowercase__ : Tuple = WavaVecaPhonemeCTCTokenizer( __lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCamelCase , ) lowercase__ : str = True if config.feat_extract_norm == '''layer''' else False lowercase__ : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) lowercase__ : Tuple = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) lowercase__ : List[str] = UniSpeechForCTC(__lowerCamelCase ) else: lowercase__ : List[Any] = UniSpeechForPreTraining(__lowerCamelCase ) if is_finetuned: lowercase__ , lowercase__ , lowercase__ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} ) else: lowercase__ , lowercase__ , lowercase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowercase__ : Union[str, Any] = model[0].eval() recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) hf_unispeech.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) lowerCAmelCase_ = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
16
"""simple docstring""" import random def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool: '''simple docstring''' lowercase_ = num - 1 lowercase_ = 0 while s % 2 == 0: lowercase_ = s // 2 t += 1 for _ in range(5 ): lowercase_ = random.randrange(2 , num - 1 ) lowercase_ = pow(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if v != 1: lowercase_ = 0 while v != (num - 1): if i == t - 1: return False else: lowercase_ = i + 1 lowercase_ = (v**2) % num return True def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool: '''simple docstring''' if num < 2: return False lowercase_ = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 1_01, 1_03, 1_07, 1_09, 1_13, 1_27, 1_31, 1_37, 1_39, 1_49, 1_51, 1_57, 1_63, 1_67, 1_73, 1_79, 1_81, 1_91, 1_93, 1_97, 1_99, 2_11, 2_23, 2_27, 2_29, 2_33, 2_39, 2_41, 2_51, 2_57, 2_63, 2_69, 2_71, 2_77, 2_81, 2_83, 2_93, 3_07, 3_11, 3_13, 3_17, 3_31, 3_37, 3_47, 3_49, 3_53, 3_59, 3_67, 3_73, 3_79, 3_83, 3_89, 3_97, 4_01, 4_09, 4_19, 4_21, 4_31, 4_33, 4_39, 4_43, 4_49, 4_57, 4_61, 4_63, 4_67, 4_79, 4_87, 4_91, 4_99, 5_03, 5_09, 5_21, 5_23, 5_41, 5_47, 5_57, 5_63, 5_69, 5_71, 5_77, 5_87, 5_93, 5_99, 6_01, 6_07, 6_13, 6_17, 6_19, 6_31, 6_41, 6_43, 6_47, 6_53, 6_59, 6_61, 6_73, 6_77, 6_83, 6_91, 7_01, 7_09, 7_19, 7_27, 7_33, 7_39, 7_43, 7_51, 7_57, 7_61, 7_69, 7_73, 7_87, 7_97, 8_09, 8_11, 8_21, 8_23, 8_27, 8_29, 8_39, 8_53, 8_57, 8_59, 8_63, 8_77, 8_81, 8_83, 8_87, 9_07, 9_11, 9_19, 9_29, 9_37, 9_41, 9_47, 9_53, 9_67, 9_71, 9_77, 9_83, 9_91, 9_97, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__lowerCAmelCase ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10_24 ) -> int: '''simple docstring''' while True: lowercase_ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(__lowerCAmelCase ): return num if __name__ == "__main__": UpperCAmelCase : Tuple = generate_large_prime() print(("Prime number:", num)) print(("is_prime_low_num:", is_prime_low_num(num)))
136
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int: '''simple docstring''' lowercase_ = """""" for word_or_phrase in separated: if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise Exception("""join() accepts only strings to be joined""" ) joined += word_or_phrase + separator return joined.strip(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": from doctest import testmod testmod()
367
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def _UpperCAmelCase ( self : str): """simple docstring""" lowercase_ = Vector([1, 2, 3]) self.assertEqual(x.component(0) , 1) self.assertEqual(x.component(2) , 3) lowercase_ = Vector() def _UpperCAmelCase ( self : Tuple): """simple docstring""" lowercase_ = Vector([0, 0, 0, 0, 0, 1]) self.assertEqual(str(lowerCAmelCase_) , """(0,0,0,0,0,1)""") def _UpperCAmelCase ( self : int): """simple docstring""" lowercase_ = Vector([1, 2, 3, 4]) self.assertEqual(len(lowerCAmelCase_) , 4) def _UpperCAmelCase ( self : Any): """simple docstring""" lowercase_ = Vector([1, 2]) lowercase_ = Vector([1, 2, 3, 4, 5]) lowercase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) lowercase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5]) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3) self.assertEqual(z.euclidean_length() , 0) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3) def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ = Vector([1, 2, 3]) lowercase_ = Vector([1, 1, 1]) self.assertEqual((x + y).component(0) , 2) self.assertEqual((x + y).component(1) , 3) self.assertEqual((x + y).component(2) , 4) def _UpperCAmelCase ( self : List[Any]): """simple docstring""" lowercase_ = Vector([1, 2, 3]) lowercase_ = Vector([1, 1, 1]) self.assertEqual((x - y).component(0) , 0) self.assertEqual((x - y).component(1) , 1) self.assertEqual((x - y).component(2) , 2) def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" lowercase_ = Vector([1, 2, 3]) lowercase_ = Vector([2, -1, 4]) # for test of dot product lowercase_ = Vector([1, -2, -1]) self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""") self.assertEqual((a * b) , 0) def _UpperCAmelCase ( self : int): """simple docstring""" self.assertEqual(str(zero_vector(1_0)).count("""0""") , 1_0) def _UpperCAmelCase ( self : Dict): """simple docstring""" self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""") def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" lowercase_ = Vector([1, 2, 3]) lowercase_ = Vector([1, 0, 1]) self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_)) , """(3,4,7)""") def _UpperCAmelCase ( self : List[Any]): """simple docstring""" lowercase_ = Vector([1, 0, 0, 0, 0, 0]) lowercase_ = x.copy() self.assertEqual(str(lowerCAmelCase_) , str(lowerCAmelCase_)) def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ = Vector([1, 0, 0]) x.change_component(0 , 0) x.change_component(1 , 1) self.assertEqual(str(lowerCAmelCase_) , """(0,1,0)""") def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_)) def _UpperCAmelCase ( self : str): """simple docstring""" lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) lowercase_ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_)) def _UpperCAmelCase ( self : List[str]): """simple docstring""" lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) lowercase_ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_)) def _UpperCAmelCase ( self : List[str]): """simple docstring""" lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(-5 , a.determinant()) def _UpperCAmelCase ( self : int): """simple docstring""" lowercase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3) lowercase_ = Vector([1, 2, 3]) self.assertEqual("""(14,32,50)""" , str(a * x)) self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2)) def _UpperCAmelCase ( self : List[str]): """simple docstring""" lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) a.change_component(0 , 2 , 5) self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_)) def _UpperCAmelCase ( self : str): """simple docstring""" lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(7 , a.component(2 , 1) , 0.01) def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3) self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b)) def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3) self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b)) def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" self.assertEqual( """|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , ) if __name__ == "__main__": unittest.main()
313
0
'''simple docstring''' from collections.abc import Iterable from typing import Any class A__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : int | None = None ) -> List[Any]: """simple docstring""" _UpperCAmelCase : str = value _UpperCAmelCase : Tuple = None # Added in order to delete a node easier _UpperCAmelCase : List[Any] = None _UpperCAmelCase : Optional[Any] = None def __repr__( self : Union[str, Any] ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 ) class A__ : """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Node | None = None ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Any = root def __str__( self : List[str] ) -> str: """simple docstring""" return str(self.root ) def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Node , lowerCAmelCase__ : Node | None ) -> None: """simple docstring""" if new_children is not None: # reset its kids _UpperCAmelCase : List[Any] = node.parent if node.parent is not None: # reset its parent if self.is_right(UpperCamelCase__ ): # If it is the right children _UpperCAmelCase : Union[str, Any] = new_children else: _UpperCAmelCase : Dict = new_children else: _UpperCAmelCase : List[Any] = new_children def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Node ) -> bool: """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def _lowerCAmelCase ( self : int ) -> bool: """simple docstring""" return self.root is None def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Dict ) -> None: """simple docstring""" _UpperCAmelCase : List[Any] = Node(UpperCamelCase__ ) # create a new Node if self.empty(): # if Tree is empty _UpperCAmelCase : int = new_node # set its root else: # Tree is not empty _UpperCAmelCase : Dict = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: _UpperCAmelCase : Optional[Any] = new_node # We insert the new node in a leaf break else: _UpperCAmelCase : Union[str, Any] = parent_node.left else: if parent_node.right is None: _UpperCAmelCase : Optional[Any] = new_node break else: _UpperCAmelCase : Any = parent_node.right _UpperCAmelCase : Optional[int] = parent_node def _lowerCAmelCase ( self : int , *lowerCAmelCase__ : Optional[Any] ) -> None: """simple docstring""" for value in values: self.__insert(UpperCamelCase__ ) def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> Node | None: """simple docstring""" if self.empty(): raise IndexError("Warning: Tree is empty! please use another." ) else: _UpperCAmelCase : Union[str, Any] = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: _UpperCAmelCase : List[Any] = node.left if value < node.value else node.right return node def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: if self.root is None: return None _UpperCAmelCase : Optional[Any] = self.root if not self.empty(): while node.right is not None: _UpperCAmelCase : Optional[int] = node.right return node def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: _UpperCAmelCase : Optional[int] = self.root if self.root is None: return None if not self.empty(): _UpperCAmelCase : int = self.root while node.left is not None: _UpperCAmelCase : Tuple = node.left return node def _lowerCAmelCase ( self : str , lowerCAmelCase__ : int ) -> None: """simple docstring""" _UpperCAmelCase : List[str] = self.search(UpperCamelCase__ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(UpperCamelCase__ , UpperCamelCase__ ) elif node.left is None: # Has only right children self.__reassign_nodes(UpperCamelCase__ , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(UpperCamelCase__ , node.left ) else: _UpperCAmelCase : List[str] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore _UpperCAmelCase : Union[str, Any] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Node | None ) -> Iterable: """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Dict=None ) -> Any: """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : list , lowerCAmelCase__ : Node | None ) -> None: """simple docstring""" if node: self.inorder(UpperCamelCase__ , node.left ) arr.append(node.value ) self.inorder(UpperCamelCase__ , node.right ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Node ) -> int: """simple docstring""" _UpperCAmelCase : str = [] self.inorder(UpperCamelCase__ , UpperCamelCase__ ) # append all values to list using inorder traversal return arr[k - 1] def __UpperCAmelCase ( a_: str ): _UpperCAmelCase : List[str] = [] if curr_node is not None: _UpperCAmelCase : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def __UpperCAmelCase ( ): _UpperCAmelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7) _UpperCAmelCase : str = BinarySearchTree() for i in testlist: t.insert(A_ ) # Prints all the elements of the list in order traversal print(A_ ) if t.search(6 ) is not None: print("The value 6 exists" ) else: print("The value 6 doesn't exist" ) if t.search(-1 ) is not None: print("The value -1 exists" ) else: print("The value -1 doesn't exist" ) if not t.empty(): print("Max Value: ", t.get_max().value ) # type: ignore print("Min Value: ", t.get_min().value ) # type: ignore for i in testlist: t.remove(A_ ) print(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
145
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = image_size __magic_name__ = patch_size __magic_name__ = num_channels __magic_name__ = embed_dim __magic_name__ = depths __magic_name__ = num_heads __magic_name__ = window_size __magic_name__ = mlp_ratio __magic_name__ = qkv_bias __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = drop_path_rate __magic_name__ = hidden_act __magic_name__ = use_absolute_embeddings __magic_name__ = patch_norm __magic_name__ = layer_norm_eps __magic_name__ = initializer_range __magic_name__ = is_training __magic_name__ = scope __magic_name__ = use_labels __magic_name__ = type_sequence_label_size __magic_name__ = encoder_stride __magic_name__ = out_features __magic_name__ = out_indices def _lowercase ( self : str ) -> Optional[int]: """simple docstring""" __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = self.get_config() return config, pixel_values, labels def _lowercase ( self : Tuple ) -> str: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]: """simple docstring""" __magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ ) __magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple: """simple docstring""" __magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(UpperCamelCase__ ): __magic_name__ = ["""stem"""] __magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ ) def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs __magic_name__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False a__ = False def _lowercase ( self : Any ) -> List[str]: """simple docstring""" __magic_name__ = MaskFormerSwinModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def _lowercase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass def _lowercase ( self : str ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase ( self : Optional[int] ) -> List[str]: """simple docstring""" return def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase__ ) @unittest.skip("""Swin does not use inputs_embeds""" ) def _lowercase ( self : Any ) -> int: """simple docstring""" pass @unittest.skip("""Swin does not support feedforward chunking""" ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" pass def _lowercase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __magic_name__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def _lowercase ( self : Tuple ) -> Dict: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) __magic_name__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ = [*signature.parameters.keys()] __magic_name__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def _lowercase ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def _lowercase ( self : List[str] ) -> Dict: """simple docstring""" pass def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any: """simple docstring""" __magic_name__ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) __magic_name__ = outputs.hidden_states __magic_name__ = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # Swin has a different seq_length __magic_name__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _lowercase ( self : Dict ) -> Dict: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = 3 __magic_name__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __magic_name__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def _lowercase ( self : Optional[int] ) -> int: """simple docstring""" pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def _lowercase ( self : List[str] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def _lowercase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass def _lowercase ( self : Dict ) -> Any: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ): __magic_name__ = 0 return t def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ): with torch.no_grad(): __magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple() def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ): if isinstance(UpperCamelCase__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ): recursive_check(UpperCamelCase__ , UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(UpperCamelCase__ , UpperCamelCase__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has''' F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.''' ) , ) recursive_check(UpperCamelCase__ , UpperCamelCase__ ) for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} ) @require_torch class UpperCAmelCase_ ( unittest.TestCase , _A ): '''simple docstring''' a__ = (MaskFormerSwinBackbone,) if is_torch_available() else () a__ = MaskFormerSwinConfig def _lowercase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __magic_name__ = MaskFormerSwinModelTester(self ) def _lowercase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: __magic_name__ = backbone_class(UpperCamelCase__ ) backbone.to(UpperCamelCase__ ) backbone.eval() __magic_name__ = backbone(**UpperCamelCase__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True __magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: __magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ ) self.assertIsNotNone(outputs.attentions )
88
0
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : int = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) lowerCAmelCase : Dict = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) lowerCAmelCase : Tuple = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) lowerCAmelCase : Union[str, Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) lowerCAmelCase : Optional[int] = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) lowerCAmelCase : Any = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) lowerCAmelCase : int = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) lowerCAmelCase : Optional[int] = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) lowerCAmelCase : List[Any] = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) lowerCAmelCase : Optional[int] = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) lowerCAmelCase : Any = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) lowerCAmelCase : str = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) lowerCAmelCase : str = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) lowerCAmelCase : Any = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) lowerCAmelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowerCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowerCAmelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowerCAmelCase : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowerCAmelCase : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowerCAmelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowerCAmelCase : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowerCAmelCase : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowerCAmelCase : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowerCAmelCase : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowerCAmelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = FLAX_MODEL_MAPPING lowerCAmelCase : Tuple = auto_class_update(FlaxAutoModel) class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : Tuple = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowerCAmelCase : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowerCAmelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowerCAmelCase : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCAmelCase : Optional[int] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCAmelCase : Union[str, Any] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowerCAmelCase : List[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : Optional[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCAmelCase : Optional[Any] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowerCAmelCase : List[str] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowerCAmelCase : str = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : List[str] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase : str = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : int = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class __lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCAmelCase : Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowerCAmelCase : Optional[Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
127
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[int] = { """configuration_nllb_moe""": [ """NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NllbMoeConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ """NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""", """NllbMoeForConditionalGeneration""", """NllbMoeModel""", """NllbMoePreTrainedModel""", """NllbMoeTop2Router""", """NllbMoeSparseMLP""", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
127
1
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowercase__ ='bert-base-cased' lowercase__ ='google/pegasus-xsum' lowercase__ =[' Sam ate lunch today.', 'Sams lunch ingredients.'] lowercase__ =['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] lowercase__ ='patrickvonplaten/t5-tiny-random' lowercase__ ='sshleifer/bart-tiny-random' lowercase__ ='sshleifer/tiny-mbart' lowercase__ ='sshleifer/tiny-marian-en-de' def __UpperCamelCase ( lowerCAmelCase__ : Path , lowerCAmelCase__ : list ): __a : Any = '''\n'''.join(lowerCAmelCase__ ) Path(lowerCAmelCase__ ).open('''w''' ).writelines(lowerCAmelCase__ ) def __UpperCamelCase ( lowerCAmelCase__ : Any ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.source" ) , lowerCAmelCase__ ) _dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.target" ) , lowerCAmelCase__ ) return tmp_dir class UpperCamelCase__ ( __lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def lowerCAmelCase (self : str , snake_case_ : str ): __a : int = AutoTokenizer.from_pretrained(snake_case_ ) __a : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __a : Dict = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES ) __a : List[Any] = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES ) __a : int = 4 __a : Optional[Any] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated __a , __a : List[Any] = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. __a : str = SeqaSeqDataset( snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , ) __a : str = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(snake_case_ , snake_case_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place __a : Any = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Tuple ): __a : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) __a : Tuple = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __a : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES ) __a : Optional[int] = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES ) __a : Tuple = 4 __a : Dict = LegacySeqaSeqDataset( snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=2_0 , max_target_length=snake_case_ , ) __a : List[str] = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def lowerCAmelCase (self : Any ): __a : List[str] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) __a : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) __a : int = tmp_dir.joinpath('''train.source''' ).open().readlines() __a : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(snake_case_ , snake_case_ , 1_2_8 , snake_case_ ) __a : Tuple = {x.name for x in tmp_dir.iterdir()} __a : Union[str, Any] = {x.name for x in save_dir.iterdir()} __a : str = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(snake_case_ ) < len(snake_case_ ) assert len(snake_case_ ) == 1 assert len(packed_examples[0] ) == sum(len(snake_case_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def lowerCAmelCase (self : int ): if not FAIRSEQ_AVAILABLE: return __a , __a , __a : Dict = self._get_dataset(max_len=6_4 ) __a : int = 6_4 __a : int = ds.make_dynamic_sampler(snake_case_ , required_batch_size_multiple=snake_case_ ) __a : List[Any] = [len(snake_case_ ) for x in batch_sampler] assert len(set(snake_case_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(snake_case_ ) == len(snake_case_ ) # no dropped or added examples __a : int = DataLoader(snake_case_ , batch_sampler=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 ) __a : Optional[Any] = [] __a : int = [] for batch in data_loader: __a : Tuple = batch['''input_ids'''].shape __a : Any = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple __a : Any = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(snake_case_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(snake_case_ ) assert num_src_per_batch[0] == max(snake_case_ ) if failures: raise AssertionError(f"too many tokens in {len(snake_case_ )} batches" ) def lowerCAmelCase (self : Dict ): __a , __a , __a : int = self._get_dataset(max_len=5_1_2 ) __a : str = 2 __a : Optional[Any] = ds.make_sortish_sampler(snake_case_ , shuffle=snake_case_ ) __a : str = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 ) __a : List[Any] = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case_ ) __a : str = tokenizer.pad_token_id def count_pad_tokens(snake_case_ : str , snake_case_ : Tuple="input_ids" ): return [batch[k].eq(snake_case_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(snake_case_ , k='''labels''' ) ) < sum(count_pad_tokens(snake_case_ , k='''labels''' ) ) assert sum(count_pad_tokens(snake_case_ ) ) < sum(count_pad_tokens(snake_case_ ) ) assert len(snake_case_ ) == len(snake_case_ ) def lowerCAmelCase (self : Any , snake_case_ : Dict=1_0_0_0 , snake_case_ : str=1_2_8 ): if os.getenv('''USE_REAL_DATA''' , snake_case_ ): __a : Any = '''examples/seq2seq/wmt_en_ro''' __a : Union[str, Any] = max_len * 2 * 6_4 if not Path(snake_case_ ).joinpath('''train.len''' ).exists(): save_len_file(snake_case_ , snake_case_ ) else: __a : str = '''examples/seq2seq/test_data/wmt_en_ro''' __a : Optional[int] = max_len * 4 save_len_file(snake_case_ , snake_case_ ) __a : List[Any] = AutoTokenizer.from_pretrained(snake_case_ ) __a : Optional[Any] = SeqaSeqDataset( snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , n_obs=snake_case_ , ) return ds, max_tokens, tokenizer def lowerCAmelCase (self : Dict ): __a , __a , __a : Union[str, Any] = self._get_dataset() __a : Tuple = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=snake_case_ ) ) __a : Optional[int] = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=snake_case_ ) ) assert idsa.intersection(snake_case_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def lowerCAmelCase (self : int , snake_case_ : Dict ): __a : int = AutoTokenizer.from_pretrained(snake_case_ , use_fast=snake_case_ ) if tok_name == MBART_TINY: __a : Optional[Any] = SeqaSeqDataset( snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) __a : Union[str, Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: __a : Optional[int] = SeqaSeqDataset( snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) __a : Tuple = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(snake_case_ ) == 1 if tok_name == BART_TINY else len(snake_case_ ) == 0
216
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer lowercase__ =logging.get_logger(__name__) lowercase__ ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowercase__ ={ 'vocab_file': { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt' ), } } lowercase__ ={ 'junnyu/roformer_chinese_small': 1536, 'junnyu/roformer_chinese_base': 1536, 'junnyu/roformer_chinese_char_small': 512, 'junnyu/roformer_chinese_char_base': 512, 'junnyu/roformer_small_discriminator': 128, 'junnyu/roformer_small_generator': 128, } lowercase__ ={ 'junnyu/roformer_chinese_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_base': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_base': {'do_lower_case': True}, 'junnyu/roformer_small_discriminator': {'do_lower_case': True}, 'junnyu/roformer_small_generator': {'do_lower_case': True}, } class UpperCamelCase__ ( __lowercase ): _SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE : Optional[int] = RoFormerTokenizer def __init__(self : List[str] , snake_case_ : Optional[int]=None , snake_case_ : str=None , snake_case_ : Optional[Any]=True , snake_case_ : str="[UNK]" , snake_case_ : Dict="[SEP]" , snake_case_ : Any="[PAD]" , snake_case_ : str="[CLS]" , snake_case_ : List[Any]="[MASK]" , snake_case_ : Any=True , snake_case_ : List[str]=None , **snake_case_ : Optional[int] , ): super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , ) __a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('''lowercase''' , snake_case_ ) != do_lower_case or pre_tok_state.get('''strip_accents''' , snake_case_ ) != strip_accents ): __a : List[str] = getattr(snake_case_ , pre_tok_state.pop('''type''' ) ) __a : Optional[Any] = do_lower_case __a : Optional[int] = strip_accents __a : List[str] = pre_tok_class(**snake_case_ ) __a : Optional[Any] = do_lower_case def __getstate__(self : Union[str, Any] ): __a : Any = self.__dict__.copy() __a : Union[str, Any] = BertPreTokenizer() return state def __setstate__(self : Tuple , snake_case_ : Optional[Any] ): __a : Dict = d __a : str = self.__dict__['''_tokenizer'''].get_vocab() __a : Optional[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) ) def lowerCAmelCase (self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=None ): __a : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase (self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): __a : int = [self.sep_token_id] __a : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase (self : int , snake_case_ : str , snake_case_ : Optional[str] = None ): __a : Optional[Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ ) return tuple(snake_case_ ) def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ): __a : List[str] = BertPreTokenizer() return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
216
1
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = BarthezTokenizer __UpperCamelCase = BarthezTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple: super().setUp() UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowercase_ ) UpperCAmelCase = tokenizer def UpperCAmelCase__ ( self :Optional[int] ) -> Union[str, Any]: UpperCAmelCase = '<pad>' UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def UpperCAmelCase__ ( self :str ) -> Union[str, Any]: UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(lowercase_ ) , 10_11_22 ) def UpperCAmelCase__ ( self :Any ) -> Optional[int]: self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 ) @require_torch def UpperCAmelCase__ ( self :Optional[int] ) -> Dict: UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] UpperCAmelCase = [0, 57, 30_18, 7_03_07, 91, 2] UpperCAmelCase = self.tokenizer( lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , truncation=lowercase_ , return_tensors='pt' ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Dict ) -> str: if not self.test_rust_tokenizer: return UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = 'I was born in 92000, and this is falsé.' UpperCAmelCase = tokenizer.tokenize(lowercase_ ) UpperCAmelCase = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) UpperCAmelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) UpperCAmelCase = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = tokenizer.encode(lowercase_ ) UpperCAmelCase = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) @slow def UpperCAmelCase__ ( self :Union[str, Any] ) -> str: # fmt: off UpperCAmelCase = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. UpperCAmelCase = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowercase_ , )
181
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case_ = { """configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ["""RemBertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ["""RemBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """RemBertForCausalLM""", """RemBertForMaskedLM""", """RemBertForMultipleChoice""", """RemBertForQuestionAnswering""", """RemBertForSequenceClassification""", """RemBertForTokenClassification""", """RemBertLayer""", """RemBertModel""", """RemBertPreTrainedModel""", """load_tf_weights_in_rembert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRemBertForCausalLM""", """TFRemBertForMaskedLM""", """TFRemBertForMultipleChoice""", """TFRemBertForQuestionAnswering""", """TFRemBertForSequenceClassification""", """TFRemBertForTokenClassification""", """TFRemBertLayer""", """TFRemBertModel""", """TFRemBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
181
1
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent snake_case_ = {'UserAgent': UserAgent().random} def lowerCamelCase__ ( snake_case_ : Any ) -> dict: __snake_case = script.contents[0] __snake_case = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class SCREAMING_SNAKE_CASE__ : def __init__(self : Optional[Any] , a__ : Tuple ): """simple docstring""" __snake_case = f"""https://www.instagram.com/{username}/""" __snake_case = self.get_json() def a (self : Tuple ): """simple docstring""" __snake_case = requests.get(self.url , headers=a__ ).text __snake_case = BeautifulSoup(a__ , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self : Optional[int] ): """simple docstring""" return f"""{self.__class__.__name__}('{self.username}')""" def __str__(self : int ): """simple docstring""" return f"""{self.fullname} ({self.username}) is {self.biography}""" @property def a (self : Optional[Any] ): """simple docstring""" return self.user_data["username"] @property def a (self : List[Any] ): """simple docstring""" return self.user_data["full_name"] @property def a (self : str ): """simple docstring""" return self.user_data["biography"] @property def a (self : Tuple ): """simple docstring""" return self.user_data["business_email"] @property def a (self : str ): """simple docstring""" return self.user_data["external_url"] @property def a (self : str ): """simple docstring""" return self.user_data["edge_followed_by"]["count"] @property def a (self : str ): """simple docstring""" return self.user_data["edge_follow"]["count"] @property def a (self : Dict ): """simple docstring""" return self.user_data["edge_owner_to_timeline_media"]["count"] @property def a (self : List[str] ): """simple docstring""" return self.user_data["profile_pic_url_hd"] @property def a (self : Dict ): """simple docstring""" return self.user_data["is_verified"] @property def a (self : int ): """simple docstring""" return self.user_data["is_private"] def lowerCamelCase__ ( snake_case_ : str = "github" ) -> None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions __snake_case = InstagramUser(snake_case_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , snake_case_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 12_0000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() snake_case_ = InstagramUser('github') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
24
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[str] = CpmAntTokenizer A_ : Optional[int] = False def a (self : Optional[int] ): """simple docstring""" super().setUp() __snake_case = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def a (self : Dict ): """simple docstring""" __snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) __snake_case = '''今天天气真好!''' __snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = '''今天天气真好!''' __snake_case = [tokenizer.bos_token] + tokens __snake_case = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) __snake_case = tokenizer.decode(a__ ) self.assertEqual(a__ , a__ )
24
1
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel snake_case_ : Union[str, Any] = HfApi() snake_case_ : Tuple = {} # fmt: off snake_case_ : List[Any] = torch.tensor([ -0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467, 1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189, -1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839, 0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557 ]) snake_case_ : Union[str, Any] = torch.tensor([ -2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436, 1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208, -2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948, 2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365 ]) snake_case_ : int = torch.tensor([ -0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869, -0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304, -0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925, 0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943 ]) snake_case_ : Optional[int] = torch.tensor([ 0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172, -0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309, 0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805, -0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505 ]) snake_case_ : int = torch.tensor([ 0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133, -0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395, 0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559, -0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386 ]) snake_case_ : Optional[int] = torch.tensor([ 0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078, -0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330, 0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683, -0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431 ]) snake_case_ : int = torch.tensor([ 0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042, -0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398, 0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574, -0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390 ]) snake_case_ : int = torch.tensor([ 0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042, -0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290, 0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746, -0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473 ]) snake_case_ : List[Any] = torch.tensor([ -1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330, 1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243, -2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810, 1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251]) snake_case_ : Dict = torch.tensor([ -1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324, 0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181, -2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259, 1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266 ]) snake_case_ : Union[str, Any] = torch.tensor([ -1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212, 0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027, -2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131, 1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355 ]) snake_case_ : Union[str, Any] = torch.tensor([ -2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959, 1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351, -3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341, 3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066 ]) snake_case_ : Tuple = torch.tensor([ -2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740, 1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398, -2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395, 2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243 ]) snake_case_ : List[Any] = torch.tensor([ -2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336, 1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908, -3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560, 3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343 ]) snake_case_ : List[str] = torch.tensor([ -1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344, 1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391, -2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439, 1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219 ]) # fmt: on snake_case_ : int = api.list_models(filter="diffusers") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": snake_case_ : Optional[Any] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1] print(f"Started running {mod.modelId}!!!") if mod.modelId.startswith("CompVis"): snake_case_ : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet") else: snake_case_ : Union[str, Any] = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) snake_case_ : Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) snake_case_ : Any = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): snake_case_ : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3 ) print(f"{mod.modelId} has passed successfully!!!")
7
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = 10 def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4] UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) self.assertEqual(_snake_case , []) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''''' UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) self.assertEqual(_snake_case , []) self.assertEqual(_snake_case , []) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) UpperCAmelCase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(_snake_case , _snake_case) UpperCAmelCase_ = ['''It was the best of times.'''] self.assertEqual(_snake_case , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = torch.tensor([1, 2, 3, 4]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy()) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy()) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy()) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = 101 UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]]) UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]]) UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case) np.testing.assert_array_equal(_snake_case , _snake_case)
7
1
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class __UpperCAmelCase ( _lowerCamelCase ): __lowercase = ["""image_processor""", """tokenizer"""] __lowercase = """BlipImageProcessor""" __lowercase = """AutoTokenizer""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) # add QFormer tokenizer _snake_case = qformer_tokenizer def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): """simple docstring""" if images is None and text is None: raise ValueError('You have to specify at least images or text.' ) _snake_case = BatchFeature() if text is not None: _snake_case = self.tokenizer( text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , ) encoding.update(lowerCAmelCase_ ) _snake_case = self.qformer_tokenizer( text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , ) _snake_case = qformer_text_encoding.pop('input_ids' ) _snake_case = qformer_text_encoding.pop('attention_mask' ) if images is not None: _snake_case = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) encoding.update(lowerCAmelCase_ ) return encoding def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.tokenizer.model_input_names _snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def lowerCamelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" if os.path.isfile(lowerCAmelCase_ ): raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' ) os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) _snake_case = os.path.join(lowerCAmelCase_ , 'qformer_tokenizer' ) self.qformer_tokenizer.save_pretrained(lowerCAmelCase_ ) return super().save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) @classmethod def lowerCamelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" _snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , subfolder='qformer_tokenizer' ) _snake_case = cls._get_arguments_from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) args.append(lowerCAmelCase_ ) return cls(*lowerCAmelCase_ )
42
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE : str = "bart" SCREAMING_SNAKE_CASE : Optional[int] = True @st.cache(allow_output_mutation=lowerCamelCase_ ) def UpperCamelCase_( ) -> int: if LOAD_DENSE_INDEX: _lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' ) _lowercase : Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' ) _lowercase : str = qar_model.eval() else: _lowercase , _lowercase : Any = (None, None) if MODEL_TYPE == "bart": _lowercase : Dict = AutoTokenizer.from_pretrained('yjernite/bart_eli5' ) _lowercase : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' ) _lowercase : Any = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' ) sas_model.load_state_dict(save_dict['model'] ) _lowercase : List[Any] = sas_model.eval() else: _lowercase , _lowercase : Union[str, Any] = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=lowerCamelCase_ ) def UpperCamelCase_( ) -> str: if LOAD_DENSE_INDEX: _lowercase : Optional[Any] = faiss.StandardGpuResources() _lowercase : Optional[int] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train'] _lowercase : Tuple = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , ) _lowercase : Any = faiss.IndexFlatIP(128 ) _lowercase : Union[str, Any] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ ) wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU else: _lowercase , _lowercase : Any = (None, None) _lowercase : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=lowerCamelCase_ ) def UpperCamelCase_( ) -> Any: _lowercase : List[str] = datasets.load_dataset('eli5' , name='LFQA_reddit' ) _lowercase : Optional[Any] = elia['train_eli5'] _lowercase : Tuple = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) ) _lowercase : Union[str, Any] = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(lowerCamelCase_ ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = load_indexes() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = load_models() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_train_data() def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[str]: _lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ ) _lowercase , _lowercase : List[str] = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ ) _lowercase : List[str] = [elia_train[int(lowerCamelCase_ )] for i in I[0]] return nn_examples def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict: if source == "none": _lowercase , _lowercase : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), []) else: if method == "dense": _lowercase , _lowercase : Dict = query_qa_dense_index( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: _lowercase , _lowercase : str = query_es_index( lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , ) _lowercase : List[Any] = [ (res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst ] _lowercase : Union[str, Any] = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda lowerCamelCase_ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None), } ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> Dict: with torch.no_grad(): _lowercase : str = qa_sas_generate( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar SCREAMING_SNAKE_CASE : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" SCREAMING_SNAKE_CASE : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE : Any = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE : Union[str, Any] = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] SCREAMING_SNAKE_CASE : Optional[int] = st.sidebar.checkbox("Demo options") if demo_options: SCREAMING_SNAKE_CASE : List[str] = st.sidebar.selectbox( "", action_list, index=3, ) SCREAMING_SNAKE_CASE : Optional[int] = action_list.index(action_st) SCREAMING_SNAKE_CASE : Tuple = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) SCREAMING_SNAKE_CASE : int = show_type == "Show full text of passages" else: SCREAMING_SNAKE_CASE : Any = 3 SCREAMING_SNAKE_CASE : Dict = True SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.checkbox("Retrieval options") if retrieval_options: SCREAMING_SNAKE_CASE : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: SCREAMING_SNAKE_CASE : int = "wiki40b" SCREAMING_SNAKE_CASE : int = "dense" SCREAMING_SNAKE_CASE : str = "beam" SCREAMING_SNAKE_CASE : Optional[Any] = 2 SCREAMING_SNAKE_CASE : List[str] = 64 SCREAMING_SNAKE_CASE : Union[str, Any] = 256 SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options") if generate_options: SCREAMING_SNAKE_CASE : Any = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) SCREAMING_SNAKE_CASE : Tuple = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE : int = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) SCREAMING_SNAKE_CASE : Any = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) SCREAMING_SNAKE_CASE : str = None # start main text SCREAMING_SNAKE_CASE : List[str] = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] SCREAMING_SNAKE_CASE : str = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE : List[str] = st.text_input("Enter your question here:", "") else: SCREAMING_SNAKE_CASE : Optional[int] = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = make_support(question, source=wiki_source, method="dense", n_results=10) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="sparse", n_results=10) SCREAMING_SNAKE_CASE : Tuple = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE : Optional[Any] = support_list[:10] SCREAMING_SNAKE_CASE : int = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE : Optional[Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) SCREAMING_SNAKE_CASE : List[Any] = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE : Union[str, Any] = "[{}]({})".format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE : Any = sec_titles.split(" & ") SCREAMING_SNAKE_CASE : List[Any] = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE : str = find_nearest_training(question) SCREAMING_SNAKE_CASE : Any = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) SCREAMING_SNAKE_CASE : str = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) SCREAMING_SNAKE_CASE : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
21
0
"""simple docstring""" class __snake_case : """simple docstring""" def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : List[str] = None __A : str = None __A : Tuple = graph self._normalize_graph(__lowerCamelCase , __lowerCamelCase ) __A : Dict = len(__lowerCamelCase ) __A : Dict = None def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' if sources is int: __A : int = [sources] if sinks is int: __A : Any = [sinks] if len(__lowerCamelCase ) == 0 or len(__lowerCamelCase ) == 0: return __A : Dict = sources[0] __A : Optional[Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(__lowerCamelCase ) > 1 or len(__lowerCamelCase ) > 1: __A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) __A : Optional[int] = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: __A : str = max_input_flow __A : Any = 0 __A : int = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: __A : Tuple = max_input_flow __A : Any = size - 1 def UpperCamelCase__( self ): '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A : str = algorithm(self ) class __snake_case : """simple docstring""" def __init__( self , __lowerCamelCase ): '''simple docstring''' __A : Optional[int] = flow_network __A : Union[str, Any] = flow_network.verticesCount __A : List[str] = flow_network.sourceIndex __A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that __A : int = flow_network.graph __A : Optional[Any] = False def UpperCamelCase__( self ): '''simple docstring''' if not self.executed: self._algorithm() __A : Dict = True def UpperCamelCase__( self ): '''simple docstring''' pass class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , __lowerCamelCase ): '''simple docstring''' super().__init__(__lowerCamelCase ) # use this to save your result __A : str = -1 def UpperCamelCase__( self ): '''simple docstring''' if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , __lowerCamelCase ): '''simple docstring''' super().__init__(__lowerCamelCase ) __A : int = [[0] * self.verticies_count for i in range(self.verticies_count )] __A : str = [0] * self.verticies_count __A : Dict = [0] * self.verticies_count def UpperCamelCase__( self ): '''simple docstring''' __A : List[str] = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule __A : int = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list __A : Optional[Any] = 0 while i < len(__lowerCamelCase ): __A : Optional[int] = vertices_list[i] __A : int = self.heights[vertex_index] self.process_vertex(__lowerCamelCase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(__lowerCamelCase ) ) __A : Tuple = 0 else: i += 1 __A : Any = sum(self.preflow[self.source_index] ) def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(__lowerCamelCase , __lowerCamelCase ) self.relabel(__lowerCamelCase ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : str = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A : str = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): __A : str = self.heights[to_index] if min_height is not None: __A : List[Any] = min_height + 1 if __name__ == "__main__": a_ = [0] a_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network a_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate a_ = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
353
"""simple docstring""" from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = """""" _lowerCamelCase = """hf-legacy""" # "hf://"" is reserved for hffs def __init__( self , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ): '''simple docstring''' super().__init__(self , **__lowerCamelCase ) __A : int = repo_info __A : Optional[int] = token __A : int = None def UpperCamelCase__( self ): '''simple docstring''' if self.dir_cache is None: __A : int = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __A : Tuple = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(__lowerCamelCase ): {'''name''': str(__lowerCamelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = "rb" , **__lowerCamelCase , ): '''simple docstring''' if not isinstance(self.repo_info , __lowerCamelCase ): raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) __A : Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCamelCase , revision=self.repo_info.sha ) return fsspec.open( __lowerCamelCase , mode=__lowerCamelCase , headers=get_authentication_headers_for_url(__lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def UpperCamelCase__( self , __lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' self._get_dirs() __A : Optional[Any] = self._strip_protocol(__lowerCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__lowerCamelCase ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=False , **__lowerCamelCase ): '''simple docstring''' self._get_dirs() __A : Any = PurePosixPath(path.strip('''/''' ) ) __A : Any = {} for p, f in self.dir_cache.items(): __A : List[Any] = PurePosixPath(p.strip('''/''' ) ) __A : Dict = p.parent if root == path: __A : Union[str, Any] = f __A : List[str] = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
291
0
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _A ( unittest.TestCase ): def __a ( self : List[str] ) -> Any: """simple docstring""" lowercase : str = 10 def __a ( self : str ) -> List[Any]: """simple docstring""" lowercase : Optional[Any] = [1, 2, 3, 4] lowercase : List[str] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_A , self.block_size , 0 ) , _A ) def __a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowercase : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] lowercase : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_A , self.block_size , 0 ) , _A ) def __a ( self : Tuple ) -> Optional[int]: """simple docstring""" lowercase : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] lowercase : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_A , self.block_size , 0 ) , _A ) def __a ( self : Union[str, Any] ) -> int: """simple docstring""" lowercase : int = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' lowercase , lowercase : Optional[Any] = process_story(_A ) self.assertEqual(_A , [] ) def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : Union[str, Any] = '''''' lowercase , lowercase : int = process_story(_A ) self.assertEqual(_A , [] ) self.assertEqual(_A , [] ) def __a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : Union[str, Any] = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) lowercase , lowercase : List[Any] = process_story(_A ) lowercase : List[str] = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(_A , _A ) lowercase : Union[str, Any] = ['''It was the best of times.'''] self.assertEqual(_A , _A ) def __a ( self : List[str] ) -> int: """simple docstring""" lowercase : str = torch.tensor([1, 2, 3, 4] ) lowercase : Any = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(_A , 0 ).numpy() , expected.numpy() ) def __a ( self : int ) -> Union[str, Any]: """simple docstring""" lowercase : List[str] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) lowercase : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_A , 23 ).numpy() , expected.numpy() ) def __a ( self : Union[str, Any] ) -> str: """simple docstring""" lowercase : Dict = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowercase : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_A , 1 ).numpy() , expected.numpy() ) def __a ( self : Any ) -> Optional[int]: """simple docstring""" lowercase : Union[str, Any] = 101 lowercase : List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) lowercase : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowercase : str = compute_token_type_ids(_A , _A ) np.testing.assert_array_equal(_A , _A )
308
'''simple docstring''' __lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): # Make sure the supplied data is a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(_SCREAMING_SNAKE_CASE ) _snake_case = """""".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data ) _snake_case = len(_SCREAMING_SNAKE_CASE ) % 6 != 0 if padding_needed: # The padding that will be added later _snake_case = b"""=""" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6) else: _snake_case = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode() + padding ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = ( """argument should be a bytes-like object or ASCII string, """ f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(_SCREAMING_SNAKE_CASE ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): try: _snake_case = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) _snake_case = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _snake_case = encoded_data[:-padding] _snake_case = """""".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _snake_case = """""".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data ) _snake_case = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 ) ] return bytes(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
341
0
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' snake_case_ = [], [] while len(_snake_case ) > 1: snake_case_ = min(_snake_case ), max(_snake_case ) start.append(_snake_case ) end.append(_snake_case ) collection.remove(_snake_case ) collection.remove(_snake_case ) end.reverse() return start + collection + end if __name__ == "__main__": a : List[Any] = input('Enter numbers separated by a comma:\n').strip() a : List[Any] = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
369
'''simple docstring''' a : Dict = 6_5521 def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ = 1 snake_case_ = 0 for plain_chr in plain_text: snake_case_ = (a + ord(__UpperCAmelCase )) % MOD_ADLER snake_case_ = (b + a) % MOD_ADLER return (b << 16) | a
72
0
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class _snake_case : '''simple docstring''' A__ : Dict = BlenderbotConfig A__ : Optional[int] = {} A__ : Dict = '''gelu''' def __init__( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=13 ,lowerCamelCase_: Any=7 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: Dict=False ,lowerCamelCase_: Any=99 ,lowerCamelCase_: List[Any]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: int=4 ,lowerCamelCase_: int=37 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: Dict=20 ,lowerCamelCase_: Any=2 ,lowerCamelCase_: Dict=1 ,lowerCamelCase_: str=0 ,) -> List[str]: UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : Union[str, Any] = is_training UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : int = num_hidden_layers UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : List[Any] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : Dict = eos_token_id UpperCAmelCase_ : List[str] = pad_token_id UpperCAmelCase_ : Tuple = bos_token_id def A__ ( self: Optional[int] ) -> str: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) UpperCAmelCase_ : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) UpperCAmelCase_ : str = tf.concat([input_ids, eos_tensor] ,axis=1 ) UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : int = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) UpperCAmelCase_ : str = prepare_blenderbot_inputs_dict(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) return config, inputs_dict def A__ ( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ) -> Any: UpperCAmelCase_ : Union[str, Any] = TFBlenderbotModel(config=lowerCAmelCase__ ).get_decoder() UpperCAmelCase_ : Optional[Any] = inputs_dict["""input_ids"""] UpperCAmelCase_ : Union[str, Any] = input_ids[:1, :] UpperCAmelCase_ : List[Any] = inputs_dict["""attention_mask"""][:1, :] UpperCAmelCase_ : Dict = inputs_dict["""head_mask"""] UpperCAmelCase_ : Tuple = 1 # first forward pass UpperCAmelCase_ : Dict = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,head_mask=lowerCAmelCase__ ,use_cache=lowerCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Optional[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) UpperCAmelCase_ : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and UpperCAmelCase_ : Any = tf.concat([input_ids, next_tokens] ,axis=-1 ) UpperCAmelCase_ : Optional[int] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) UpperCAmelCase_ : str = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )[0] UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,past_key_values=lowerCAmelCase__ )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice UpperCAmelCase_ : Union[str, Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=1e-3 ) def lowerCamelCase_ ( _a : Any , _a : str , _a : Optional[Any] , _a : Dict=None , _a : int=None , _a : Optional[Any]=None , _a : Tuple=None , _a : Any=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase_ : List[str] = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase_ : Tuple = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase_ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _snake_case ( __a , __a , unittest.TestCase ): '''simple docstring''' A__ : Optional[Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () A__ : int = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () A__ : List[str] = ( { '''conversational''': TFBlenderbotForConditionalGeneration, '''feature-extraction''': TFBlenderbotModel, '''summarization''': TFBlenderbotForConditionalGeneration, '''text2text-generation''': TFBlenderbotForConditionalGeneration, '''translation''': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) A__ : Tuple = True A__ : Optional[int] = False A__ : List[Any] = False def A__ ( self: Any ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = TFBlenderbotModelTester(self ) UpperCAmelCase_ : Optional[int] = ConfigTester(self ,config_class=lowerCAmelCase__ ) def A__ ( self: Dict ) -> List[Any]: self.config_tester.run_common_tests() def A__ ( self: Any ) -> Tuple: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ ) @require_tokenizers @require_tf class _snake_case ( unittest.TestCase ): '''simple docstring''' A__ : Dict = ['''My friends are cool but they eat too many carbs.'''] A__ : int = '''facebook/blenderbot-400M-distill''' @cached_property def A__ ( self: Union[str, Any] ) -> int: return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def A__ ( self: Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def A__ ( self: Any ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text ,return_tensors="""tf""" ) UpperCAmelCase_ : List[str] = self.model.generate( model_inputs.input_ids ,) UpperCAmelCase_ : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=lowerCAmelCase__ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
345
"""simple docstring""" from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __magic_name__ = logging.get_logger(__name__) @add_end_docstrings( __a , R''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''' , ) class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" def snake_case_ ( self , lowerCAmelCase__): if self.framework == "tf": __SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy() elif self.framework == "pt": __SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__) else: raise ValueError("""Unsupported framework""") return masked_index def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = self.get_masked_index(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = np.prod(masked_index.shape) if numel < 1: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , ) def snake_case_ ( self , lowerCAmelCase__): if isinstance(lowerCAmelCase__ , lowerCAmelCase__): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0]) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__): if return_tensors is None: __SCREAMING_SNAKE_CASE = self.framework __SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__) self.ensure_exactly_one_mask_token(lowerCAmelCase__) return model_inputs def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""] return model_outputs def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=5 , lowerCAmelCase__=None): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: __SCREAMING_SNAKE_CASE = target_ids.shape[0] __SCREAMING_SNAKE_CASE = model_outputs["""input_ids"""][0] __SCREAMING_SNAKE_CASE = model_outputs["""logits"""] if self.framework == "tf": __SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0] __SCREAMING_SNAKE_CASE = outputs.numpy() __SCREAMING_SNAKE_CASE = outputs[0, masked_index, :] __SCREAMING_SNAKE_CASE = stable_softmax(lowerCAmelCase__ , axis=-1) if target_ids is not None: __SCREAMING_SNAKE_CASE = tf.gather_nd(tf.squeeze(lowerCAmelCase__ , 0) , target_ids.reshape(-1 , 1)) __SCREAMING_SNAKE_CASE = tf.expand_dims(lowerCAmelCase__ , 0) __SCREAMING_SNAKE_CASE = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = topk.values.numpy(), topk.indices.numpy() else: __SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__).squeeze(-1) # Fill mask pipeline supports only one ${mask_token} per sample __SCREAMING_SNAKE_CASE = outputs[0, masked_index, :] __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1) if target_ids is not None: __SCREAMING_SNAKE_CASE = probs[..., target_ids] __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = probs.topk(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())): __SCREAMING_SNAKE_CASE = [] for v, p in zip(_values , _predictions): # Copy is important since we're going to modify this array in place __SCREAMING_SNAKE_CASE = input_ids.numpy().copy() if target_ids is not None: __SCREAMING_SNAKE_CASE = target_ids[p].tolist() __SCREAMING_SNAKE_CASE = p # Filter padding out: __SCREAMING_SNAKE_CASE = tokens[np.where(tokens != self.tokenizer.pad_token_id)] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back __SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p]), """sequence""": sequence} row.append(lowerCAmelCase__) result.append(lowerCAmelCase__) if single_mask: return result[0] return result def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None): if isinstance(lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = [targets] try: __SCREAMING_SNAKE_CASE = self.tokenizer.get_vocab() except Exception: __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = [] for target in targets: __SCREAMING_SNAKE_CASE = vocab.get(lowerCAmelCase__ , lowerCAmelCase__) if id_ is None: __SCREAMING_SNAKE_CASE = self.tokenizer( lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , max_length=1 , truncation=lowerCAmelCase__ , )["""input_ids"""] if len(lowerCAmelCase__) == 0: logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " """We cannot replace it with anything meaningful, ignoring it""") continue __SCREAMING_SNAKE_CASE = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.") target_ids.append(id_) __SCREAMING_SNAKE_CASE = list(set(lowerCAmelCase__)) if len(lowerCAmelCase__) == 0: raise ValueError("""At least one target must be provided when passed.""") __SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase__) return target_ids def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None): __SCREAMING_SNAKE_CASE = {} if targets is not None: __SCREAMING_SNAKE_CASE = self.get_target_ids(lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = target_ids if top_k is not None: __SCREAMING_SNAKE_CASE = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""") return {}, {}, postprocess_params def __call__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__): __SCREAMING_SNAKE_CASE = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) == 1: return outputs[0] return outputs
100
0
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right SCREAMING_SNAKE_CASE__ = 2_5_0_0_0_4 SCREAMING_SNAKE_CASE__ = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class a_ ( lowerCamelCase , unittest.TestCase ): lowercase = MBartaaTokenizer lowercase = MBartaaTokenizerFast lowercase = True lowercase = True def A__ ( self ) -> str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase = MBartaaTokenizer(_SCREAMING_SNAKE_CASE , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=_SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = """<s>""" UpperCamelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1054 ) def A__ ( self ) -> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = MBartaaTokenizer(_SCREAMING_SNAKE_CASE , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=_SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , ) UpperCamelCase = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) self.assertListEqual( _SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) UpperCamelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) self.assertListEqual( _SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , ) @slow def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = {"""input_ids""": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , ) def A__ ( self ) -> int: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCamelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCamelCase = tempfile.mkdtemp() UpperCamelCase = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) UpperCamelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way UpperCamelCase = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=True UpperCamelCase = tempfile.mkdtemp() UpperCamelCase = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE ) # Checks it save with the same files self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way UpperCamelCase = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) shutil.rmtree(_SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=False UpperCamelCase = tempfile.mkdtemp() UpperCamelCase = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCamelCase = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) shutil.rmtree(_SCREAMING_SNAKE_CASE ) @require_torch @require_sentencepiece @require_tokenizers class a_ ( unittest.TestCase ): lowercase = """facebook/mbart-large-50-one-to-many-mmt""" lowercase = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] lowercase = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] lowercase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2] @classmethod def A__ ( cls ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) UpperCamelCase = 1 return cls def A__ ( self ) -> str: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250038 ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids ) UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] UpperCamelCase = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , _SCREAMING_SNAKE_CASE ) UpperCamelCase = 10 UpperCamelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE ).input_ids[0] self.assertEqual(ids[0] , _SCREAMING_SNAKE_CASE ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250053, 250001] ) def A__ ( self ) -> Dict: """simple docstring""" UpperCamelCase = tempfile.mkdtemp() UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCamelCase = MBartaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _SCREAMING_SNAKE_CASE ) @require_torch def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) UpperCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.tokenizer(self.src_text , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" ) UpperCamelCase = self.tokenizer( text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=10 , return_tensors="""pt""" ) UpperCamelCase = targets["""input_ids"""] UpperCamelCase = shift_tokens_right(_SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE ) , { # en_XX, A, test, EOS """input_ids""": [[250004, 62, 3034, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } , )
183
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class a_ ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Any: """simple docstring""" UpperCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize UpperCamelCase = size UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std UpperCamelCase = do_rescale UpperCamelCase = rescale_factor UpperCamelCase = do_pad def A__ ( self ) -> str: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: """simple docstring""" if not batched: UpperCamelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): UpperCamelCase ,UpperCamelCase = image.size else: UpperCamelCase ,UpperCamelCase = image.shape[1], image.shape[2] if w < h: UpperCamelCase = int(self.size["""shortest_edge"""] * h / w ) UpperCamelCase = self.size["""shortest_edge"""] elif w > h: UpperCamelCase = self.size["""shortest_edge"""] UpperCamelCase = int(self.size["""shortest_edge"""] * w / h ) else: UpperCamelCase = self.size["""shortest_edge"""] UpperCamelCase = self.size["""shortest_edge"""] else: UpperCamelCase = [] for image in image_inputs: UpperCamelCase ,UpperCamelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a_ ( lowerCamelCase , unittest.TestCase ): lowercase = ConditionalDetrImageProcessor if is_vision_available() else None def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = ConditionalDetrImageProcessingTester(self ) @property def A__ ( self ) -> List[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) ) def A__ ( self ) -> Dict: """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) UpperCamelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" pass def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: UpperCamelCase = json.loads(f.read() ) UpperCamelCase = {"""image_id""": 39769, """annotations""": target} # encode them UpperCamelCase = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" ) UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) # verify pixel values UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area UpperCamelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) ) # verify boxes UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) ) # verify size UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) ) @slow def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: UpperCamelCase = json.loads(f.read() ) UpperCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} UpperCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them UpperCamelCase = ConditionalDetrImageProcessor(format="""coco_panoptic""" ) UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) # verify pixel values UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area UpperCamelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) ) # verify boxes UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) ) # verify masks UpperCamelCase = 822873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) ) # verify size UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
183
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING _UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = '''upernet''' def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ): super().__init__(**snake_case ) if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) snake_case_ = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) elif isinstance(snake_case , snake_case ): snake_case_ = backbone_config.get('model_type' ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(snake_case ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
285
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a ( self ): snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case ) snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = 50 snake_case_ = jax.device_count() snake_case_ = num_samples * [prompt] snake_case_ = num_samples * [init_image] snake_case_ = num_samples * [mask_image] snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case ) # shard inputs and rng snake_case_ = replicate(snake_case ) snake_case_ = jax.random.split(snake_case , jax.device_count() ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = pipeline( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case ) snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 ) snake_case_ = images[0, 253:256, 253:256, -1] snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
285
1
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowerCamelCase_ : '''simple docstring''' a__ : List[Any] = BlenderbotSmallConfig a__ : int = {} a__ : str = """gelu""" def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=False , __lowercase=99 , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=20 , __lowercase=2 , __lowercase=1 , __lowercase=0 , ) -> Tuple: __UpperCamelCase :List[Any] = parent __UpperCamelCase :Tuple = batch_size __UpperCamelCase :Optional[int] = seq_length __UpperCamelCase :Optional[int] = is_training __UpperCamelCase :str = use_labels __UpperCamelCase :str = vocab_size __UpperCamelCase :List[Any] = hidden_size __UpperCamelCase :Any = num_hidden_layers __UpperCamelCase :List[str] = num_attention_heads __UpperCamelCase :int = intermediate_size __UpperCamelCase :Any = hidden_dropout_prob __UpperCamelCase :List[str] = attention_probs_dropout_prob __UpperCamelCase :Union[str, Any] = max_position_embeddings __UpperCamelCase :Union[str, Any] = eos_token_id __UpperCamelCase :Dict = pad_token_id __UpperCamelCase :List[str] = bos_token_id def UpperCamelCase__ ( self) -> Union[str, Any]: __UpperCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) __UpperCamelCase :Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) __UpperCamelCase :List[str] = tf.concat([input_ids, eos_tensor] , axis=1) __UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __UpperCamelCase :List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase :List[str] = prepare_blenderbot_small_inputs_dict(a_ , a_ , a_) return config, inputs_dict def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[Any]: __UpperCamelCase :Any = TFBlenderbotSmallModel(config=a_).get_decoder() __UpperCamelCase :Tuple = inputs_dict['''input_ids'''] __UpperCamelCase :Union[str, Any] = input_ids[:1, :] __UpperCamelCase :Any = inputs_dict['''attention_mask'''][:1, :] __UpperCamelCase :Optional[int] = inputs_dict['''head_mask'''] __UpperCamelCase :List[Any] = 1 # first forward pass __UpperCamelCase :Any = model(a_ , attention_mask=a_ , head_mask=a_ , use_cache=a_) __UpperCamelCase :Union[str, Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCamelCase :Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size) __UpperCamelCase :Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and __UpperCamelCase :Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1) __UpperCamelCase :Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1) __UpperCamelCase :Optional[int] = model(a_ , attention_mask=a_)[0] __UpperCamelCase :Union[str, Any] = model(a_ , attention_mask=a_ , past_key_values=a_)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice __UpperCamelCase :List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1])) __UpperCamelCase :List[Any] = output_from_no_past[:, -3:, random_slice_idx] __UpperCamelCase :str = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(a_ , a_ , rtol=1E-3) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ): '''simple docstring''' if attention_mask is None: __UpperCamelCase :Any = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __UpperCamelCase :int = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __UpperCamelCase :Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCamelCase :Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCamelCase :int = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase_ ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' a__ : Any = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) a__ : Optional[int] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () a__ : Any = ( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) a__ : int = True a__ : Dict = False a__ : Dict = False def UpperCamelCase__ ( self) -> Optional[Any]: __UpperCamelCase :str = TFBlenderbotSmallModelTester(self) __UpperCamelCase :List[str] = ConfigTester(self , config_class=a_) def UpperCamelCase__ ( self) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCamelCase__ ( self) -> Optional[int]: __UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*a_) @require_tokenizers @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' a__ : Dict = [ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i\'m going to throw up.\nand why is that?""" ] a__ : Dict = """facebook/blenderbot_small-90M""" @cached_property def UpperCamelCase__ ( self) -> Optional[Any]: return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''') @cached_property def UpperCamelCase__ ( self) -> str: __UpperCamelCase :Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model @slow def UpperCamelCase__ ( self) -> List[str]: __UpperCamelCase :Any = self.tokenizer(self.src_text , return_tensors='''tf''') __UpperCamelCase :List[str] = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=a_ , ) __UpperCamelCase :Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=a_)[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
352
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowercase = { '''configuration_layoutlmv3''': [ '''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv3Config''', '''LayoutLMv3OnnxConfig''', ], '''processing_layoutlmv3''': ['''LayoutLMv3Processor'''], '''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ['''LayoutLMv3TokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv3ForQuestionAnswering''', '''LayoutLMv3ForSequenceClassification''', '''LayoutLMv3ForTokenClassification''', '''LayoutLMv3Model''', '''LayoutLMv3PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLayoutLMv3ForQuestionAnswering''', '''TFLayoutLMv3ForSequenceClassification''', '''TFLayoutLMv3ForTokenClassification''', '''TFLayoutLMv3Model''', '''TFLayoutLMv3PreTrainedModel''', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ['''LayoutLMv3FeatureExtractor'''] __lowercase = ['''LayoutLMv3ImageProcessor'''] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
105
0