code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __snake_case ="""▁""" __snake_case =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): lowerCamelCase : int = BertGenerationTokenizer lowerCamelCase : Union[str, Any] = False lowerCamelCase : Dict = True def __UpperCAmelCase ( self : int ) -> int: super().setUp() lowerCAmelCase = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self : str ) -> List[Any]: lowerCAmelCase = '<s>' lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[Any] ) -> Tuple: lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<pad>' ) self.assertEqual(len(UpperCAmelCase__ ) , 1_0_0_2 ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def __UpperCAmelCase ( self : str ) -> Optional[int]: lowerCAmelCase = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) lowerCAmelCase = tokenizer.tokenize('This is a test' ) self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def __UpperCAmelCase ( self : Tuple ) -> int: return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) @slow def __UpperCAmelCase ( self : Tuple ) -> Optional[int]: lowerCAmelCase = 'Hello World!' lowerCAmelCase = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @slow def __UpperCAmelCase ( self : Dict ) -> int: lowerCAmelCase = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) lowerCAmelCase = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @require_torch @slow def __UpperCAmelCase ( self : Tuple ) -> Optional[int]: import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowerCAmelCase = ' '.join(UpperCAmelCase__ ) lowerCAmelCase = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='pt' , return_token_type_ids=UpperCAmelCase__ ) lowerCAmelCase = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase__ ) lowerCAmelCase = BertGenerationConfig() lowerCAmelCase = BertGenerationEncoder(UpperCAmelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCAmelCase__ ) model(**UpperCAmelCase__ ) @slow def __UpperCAmelCase ( self : Dict ) -> List[str]: # fmt: off lowerCAmelCase = {'input_ids': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
4
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number > 0: raise ValueError("input must be a negative integer" ) UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] ) UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Tuple = ( ( "1" + "0" * (binary_number_length - len(__magic_name__ )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
311
0
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
262
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __A ( a , a , unittest.TestCase ): __A = IFInpaintingSuperResolutionPipeline __A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) __A = PipelineTesterMixin.required_optional_params - {"""latents"""} def _snake_case ( self ): return self._get_superresolution_dummy_components() def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith("""mps""" ): lowerCamelCase =torch.manual_seed(UpperCAmelCase_ ) else: lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowerCamelCase =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCamelCase ={ """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _snake_case ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _snake_case ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def _snake_case ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def _snake_case ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _snake_case ( self ): self._test_save_load_local() def _snake_case ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
262
1
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow __UpperCAmelCase = False class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self , _UpperCamelCase=3_2 ) -> List[str]: set_seed(0 ) UpperCAmelCase_ : int = UNetaDModel(sample_size=__lowerCAmelCase , in_channels=3 , out_channels=3 ) UpperCAmelCase_ : Dict = torch.optim.SGD(model.parameters() , lr=0.00_01 ) return model, optimizer @slow def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : Optional[Any] = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable UpperCAmelCase_ : List[str] = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=__lowerCAmelCase , ) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=__lowerCAmelCase , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) UpperCAmelCase_ : Tuple = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(__lowerCAmelCase ) for _ in range(4 )] UpperCAmelCase_ : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(__lowerCAmelCase ) for _ in range(4 )] UpperCAmelCase_ : str = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(__lowerCAmelCase ) for _ in range(4 )] # train with a DDPM scheduler UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(__lowerCAmelCase ) for i in range(4 ): optimizer.zero_grad() UpperCAmelCase_ : Any = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) UpperCAmelCase_ : List[Any] = model(__lowerCAmelCase , timesteps[i] ).sample UpperCAmelCase_ : Union[str, Any] = torch.nn.functional.mse_loss(__lowerCAmelCase , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(__lowerCAmelCase ) for i in range(4 ): optimizer.zero_grad() UpperCAmelCase_ : int = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) UpperCAmelCase_ : Optional[int] = model(__lowerCAmelCase , timesteps[i] ).sample UpperCAmelCase_ : Union[str, Any] = torch.nn.functional.mse_loss(__lowerCAmelCase , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) ) self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
29
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict: """simple docstring""" A__ = multiprocessing.Manager() A__ = manager.list() A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil A__ = shutil.rmtree A__ = os.rmdir A__ = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: A__ = {} with swallow_io(): with time_limit(__a ): exec(__a , __a ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(F'failed: {e}' ) # Needed for cleaning up. A__ = rmtree A__ = rmdir A__ = chdir @contextlib.contextmanager def __lowerCamelCase ( __a :List[str] ) -> Dict: """simple docstring""" def signal_handler(__a :List[Any] , __a :Optional[Any] ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , __a ) signal.signal(signal.SIGALRM , __a ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = WriteOnlyStringIO() with contextlib.redirect_stdout(__a ): with contextlib.redirect_stderr(__a ): with redirect_stdin(__a ): yield @contextlib.contextmanager def __lowerCamelCase ( ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(__a ): yield dirname class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (io.StringIO ): '''simple docstring''' def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int: """simple docstring""" raise OSError def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int: """simple docstring""" return False class A (contextlib._RedirectStream ): # type: ignore '''simple docstring''' __lowerCamelCase : Union[str, Any] = '''stdin''' @contextlib.contextmanager def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]: """simple docstring""" if root == ".": yield return A__ = os.getcwd() os.chdir(__a ) try: yield except BaseException as exc: raise exc finally: os.chdir(__a ) def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins A__ = None A__ = None import os A__ = """1""" A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None import shutil A__ = None A__ = None A__ = None import subprocess A__ = None # type: ignore A__ = None import sys A__ = None A__ = None A__ = None A__ = None A__ = None
274
0
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) snake_case__ : Dict = { 'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json', # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class A_ ( _lowerCamelCase ): lowerCAmelCase__ = """biogpt""" def __init__(self :str , _UpperCamelCase :int=4_2384 , _UpperCamelCase :Optional[Any]=1024 , _UpperCamelCase :List[str]=24 , _UpperCamelCase :str=16 , _UpperCamelCase :Union[str, Any]=4096 , _UpperCamelCase :List[Any]="gelu" , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :List[str]=1024 , _UpperCamelCase :Any=0.0_2 , _UpperCamelCase :List[str]=1e-12 , _UpperCamelCase :Optional[Any]=True , _UpperCamelCase :Dict=True , _UpperCamelCase :Union[str, Any]=0.0 , _UpperCamelCase :int=0.0 , _UpperCamelCase :Tuple=1 , _UpperCamelCase :Optional[Any]=0 , _UpperCamelCase :Any=2 , **_UpperCamelCase :Any , )-> Union[str, Any]: __A = vocab_size __A = max_position_embeddings __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = initializer_range __A = layer_norm_eps __A = scale_embedding __A = use_cache __A = layerdrop __A = activation_dropout super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
250
import sys def _a ( lowerCamelCase: Tuple ) -> Tuple: '''simple docstring''' __A = len(lowerCamelCase ) __A = [[0 for x in range(lowerCamelCase )] for x in range(lowerCamelCase )] __A = [[0 for x in range(lowerCamelCase )] for x in range(lowerCamelCase )] for chain_length in range(2 , lowerCamelCase ): for a in range(1 , n - chain_length + 1 ): __A = a + chain_length - 1 __A = sys.maxsize for c in range(lowerCamelCase , lowerCamelCase ): __A = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: __A = cost __A = c return matrix, sol def _a ( lowerCamelCase: Optional[int] , lowerCamelCase: Optional[Any] , lowerCamelCase: List[str] ) -> Tuple: '''simple docstring''' if i == j: print('''A''' + str(lowerCamelCase ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(lowerCamelCase , lowerCamelCase , optimal_solution[i][j] ) print_optiomal_solution(lowerCamelCase , optimal_solution[i][j] + 1 , lowerCamelCase ) print(''')''' , end=''' ''' ) def _a ( ) -> List[str]: '''simple docstring''' __A = [30, 35, 15, 5, 10, 20, 25] __A = len(lowerCamelCase ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 __A , __A = matrix_chain_order(lowerCamelCase ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(lowerCamelCase , 1 , n - 1 ) if __name__ == "__main__": main()
250
1
"""simple docstring""" from __future__ import annotations def lowercase ( __snake_case : str , __snake_case : list[str] | None = None ): lowercase_ : Dict = word_bank or [] # create a table lowercase_ : int = len(__snake_case ) + 1 lowercase_ : list[list[list[str]]] = [] for _ in range(__snake_case ): table.append([] ) # seed value lowercase_ : str = [[]] # because empty string has empty combination # iterate through the indices for i in range(__snake_case ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(__snake_case )] == word: lowercase_ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(__snake_case )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(__snake_case )]: combination.reverse() return table[len(__snake_case )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
33
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class lowercase ( A__ ): """simple docstring""" _a = ComputeEnvironment.AMAZON_SAGEMAKER _a = True _a = 'ml.p3.2xlarge' _a = 'accelerate_sagemaker_execution_role' _a = 'hf-sm' _a = 'us-east-1' _a = 1 _a = 'accelerate-sagemaker-1' _a = '1.6' _a = '4.4' _a = 'train.py' _a = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] _a = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ ) assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ ) assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ ) assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ ) assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ ) with pytest.raises(UpperCamelCase_ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
97
0
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging lowerCamelCase_ = logging.get_logger(__name__) class __A: """simple docstring""" SCREAMING_SNAKE_CASE__ = None @experimental def __magic_name__ ( __a : int , __a : List[str] , __a : Optional[int] , __a : List[str] , __a : List[str] , __a : Optional[Any] , __a : Union[str, Any] ): '''simple docstring''' if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( __a , __a , __a , __a , __a , __a , __a ) return _map_with_joblib(__a , __a , __a , __a , __a , __a , __a ) def __magic_name__ ( __a : Union[str, Any] , __a : Tuple , __a : List[Any] , __a : List[Any] , __a : Dict , __a : Tuple , __a : int ): '''simple docstring''' UpperCamelCase__ = num_proc if num_proc <= len(__a ) else len(__a ) UpperCamelCase__ = [] # We organize the splits ourselve (contiguous splits) for index in range(__a ): UpperCamelCase__ = len(__a ) // num_proc UpperCamelCase__ = len(__a ) % num_proc UpperCamelCase__ = div * index + min(__a , __a ) UpperCamelCase__ = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(__a ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( f"Error dividing inputs iterable among processes. " f"Total number of objects {len(__a )}, " f"length: {sum(len(i[1] ) for i in split_kwds )}" ) logger.info( f"Spawning {num_proc} processes for {len(__a )} objects in slices of {[len(i[1] ) for i in split_kwds]}" ) UpperCamelCase__ , UpperCamelCase__ = None, None if not disable_tqdm: UpperCamelCase__ , UpperCamelCase__ = (RLock(),), tqdm.set_lock with Pool(__a , initargs=__a , initializer=__a ) as pool: UpperCamelCase__ = pool.map(__a , __a ) logger.info(f"Finished {num_proc} processes" ) UpperCamelCase__ = [obj for proc_res in mapped for obj in proc_res] logger.info(f"Unpacked {len(__a )} objects" ) return mapped def __magic_name__ ( __a : str , __a : Optional[int] , __a : str , __a : Optional[int] , __a : Optional[Any] , __a : Any , __a : Optional[int] ): '''simple docstring''' import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__a ): return joblib.Parallel()( joblib.delayed(__a )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: UpperCamelCase__ = None
361
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __A: """simple docstring""" @staticmethod def UpperCAmelCase_ (*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): pass def __magic_name__ ( __a : Image ): '''simple docstring''' UpperCamelCase__ = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __magic_name__ ( __a : Image ): '''simple docstring''' UpperCamelCase__ = np.array(__a ) UpperCamelCase__ = npimg.shape return {"hash": hashimage(__a ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __A( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) SCREAMING_SNAKE_CASE__ = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def UpperCAmelCase_ (self ): pass @slow @require_torch def UpperCAmelCase_ (self ): UpperCamelCase__ = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) UpperCamelCase__ = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 ) # Shortening by hashing UpperCamelCase__ = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = """facebook/sam-vit-huge""" UpperCamelCase__ = pipeline("""mask-generation""" , model=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 ) # Shortening by hashing UpperCamelCase__ = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053}, ] , )
178
0
import argparse from collections import defaultdict import yaml A__ = """docs/source/en/_toctree.yml""" def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = defaultdict(snake_case ) _lowerCAmelCase = [] _lowerCAmelCase = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(snake_case ) _lowerCAmelCase = new_doc_list _lowerCAmelCase = [key for key, value in counts.items() if value > 1] _lowerCAmelCase = [] for duplicate_key in duplicates: _lowerCAmelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(snake_case ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) _lowerCAmelCase = sorted(snake_case , key=lambda snake_case : s["title"].lower() ) # "overview" gets special treatment and is always first if len(snake_case ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(snake_case ) # Sort return overview_doc def _UpperCAmelCase ( snake_case=False ): """simple docstring""" with open(snake_case , encoding="""utf-8""" ) as f: _lowerCAmelCase = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase = content[api_idx]["""sections"""] # Then to the model doc _lowerCAmelCase = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _lowerCAmelCase = api_doc[scheduler_idx]["""sections"""] _lowerCAmelCase = clean_doc_toc(snake_case ) _lowerCAmelCase = False if new_scheduler_doc != scheduler_doc: _lowerCAmelCase = True if overwrite: _lowerCAmelCase = new_scheduler_doc if diff: if overwrite: _lowerCAmelCase = api_doc with open(snake_case , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(snake_case , allow_unicode=snake_case ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def _UpperCAmelCase ( snake_case=False ): """simple docstring""" with open(snake_case , encoding="""utf-8""" ) as f: _lowerCAmelCase = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase = content[api_idx]["""sections"""] # Then to the model doc _lowerCAmelCase = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _lowerCAmelCase = False _lowerCAmelCase = api_doc[pipeline_idx]["""sections"""] _lowerCAmelCase = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _lowerCAmelCase = pipeline_doc["""section"""] _lowerCAmelCase = clean_doc_toc(snake_case ) if overwrite: _lowerCAmelCase = new_sub_pipeline_doc new_pipeline_docs.append(snake_case ) # sort overall pipeline doc _lowerCAmelCase = clean_doc_toc(snake_case ) if new_pipeline_docs != pipeline_docs: _lowerCAmelCase = True if overwrite: _lowerCAmelCase = new_pipeline_docs if diff: if overwrite: _lowerCAmelCase = api_doc with open(snake_case , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(snake_case , allow_unicode=snake_case ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": A__ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") A__ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
82
'''simple docstring''' # using dfs for finding eulerian path traversal def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None )-> List[str]: '''simple docstring''' _UpperCAmelCase : Any = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: _UpperCAmelCase ,_UpperCAmelCase : Tuple = True, True _UpperCAmelCase : List[Any] = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return path def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : Optional[int] = -1 for i in range(lowerCAmelCase_ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 _UpperCAmelCase : Optional[int] = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]: '''simple docstring''' _UpperCAmelCase : str = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] _UpperCAmelCase ,_UpperCAmelCase : int = check_circuit_or_path(lowerCAmelCase_ , lowerCAmelCase_ ) if check == 3: print("""graph is not Eulerian""" ) print("""no path""" ) return _UpperCAmelCase : Dict = 1 if check == 2: _UpperCAmelCase : Dict = odd_node print("""graph has a Euler path""" ) if check == 1: print("""graph has a Euler cycle""" ) _UpperCAmelCase : Dict = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) print(lowerCAmelCase_ ) def snake_case_ ( )-> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Any = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} _UpperCAmelCase : int = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} _UpperCAmelCase : Tuple = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} _UpperCAmelCase : List[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} _UpperCAmelCase : List[str] = { 1: [], 2: [] # all degree is zero } _UpperCAmelCase : Union[str, Any] = 10 check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
215
0
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowerCamelCase : str = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple: '''simple docstring''' if os.path.exists(SCREAMING_SNAKE_CASE__ ): if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , "config.json" ) ) and os.path.isfile( os.path.join(SCREAMING_SNAKE_CASE__ , "config.json" ) ): os.remove(os.path.join(SCREAMING_SNAKE_CASE__ , "config.json" ) ) if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(SCREAMING_SNAKE_CASE__ , "pytorch_model.bin" ) ): os.remove(os.path.join(SCREAMING_SNAKE_CASE__ , "pytorch_model.bin" ) ) else: os.makedirs(SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = 2 if unlogit: SCREAMING_SNAKE_CASE__ : int = torch.pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = p * torch.log(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = 0 return -plogp.sum(dim=-1 ) def _a ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(f'''{x + 1}''' for x in range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) for row in range(len(SCREAMING_SNAKE_CASE__ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + "\t".join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + "\t".join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = model.config.num_hidden_layers, model.config.num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(args.device ) SCREAMING_SNAKE_CASE__ : int = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(args.device ) if head_mask is None: SCREAMING_SNAKE_CASE__ : List[str] = torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(args.device ) head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE__ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : Optional[int] = 0.0 SCREAMING_SNAKE_CASE__ : Dict = 0.0 for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = tuple(t.to(args.device ) for t in inputs ) ((SCREAMING_SNAKE_CASE__) ,) : Union[str, Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) SCREAMING_SNAKE_CASE__ : Tuple = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : List[str] = entropy(attn.detach() , SCREAMING_SNAKE_CASE__ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE__ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 SCREAMING_SNAKE_CASE__ : List[str] = torch.pow(torch.pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0 if not args.dont_normalize_global_importance: SCREAMING_SNAKE_CASE__ : str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(SCREAMING_SNAKE_CASE__ ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(SCREAMING_SNAKE_CASE__ ) logger.info("Head ranked by importance scores" ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.arange( head_importance.numel() , device=args.device ) SCREAMING_SNAKE_CASE__ : str = head_ranks.view_as(SCREAMING_SNAKE_CASE__ ) print_ad_tensor(SCREAMING_SNAKE_CASE__ ) return attn_entropy, head_importance, total_loss def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = compute_heads_importance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , SCREAMING_SNAKE_CASE__ , original_score * args.masking_threshold ) SCREAMING_SNAKE_CASE__ : List[str] = torch.ones_like(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) SCREAMING_SNAKE_CASE__ : Any = original_score while current_score >= original_score * args.masking_threshold: SCREAMING_SNAKE_CASE__ : str = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads SCREAMING_SNAKE_CASE__ : Optional[Any] = float("Inf" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = head_importance.view(-1 ).sort()[1] if len(SCREAMING_SNAKE_CASE__ ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads SCREAMING_SNAKE_CASE__ : str = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) SCREAMING_SNAKE_CASE__ : str = new_head_mask.view(-1 ) SCREAMING_SNAKE_CASE__ : List[Any] = 0.0 SCREAMING_SNAKE_CASE__ : List[str] = new_head_mask.view_as(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = new_head_mask.clone().detach() print_ad_tensor(SCREAMING_SNAKE_CASE__ ) # Compute metric and head importance again SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = compute_heads_importance( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , SCREAMING_SNAKE_CASE__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , ) logger.info("Final head mask" ) print_ad_tensor(SCREAMING_SNAKE_CASE__ ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = datetime.now() SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = compute_heads_importance( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , compute_importance=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = 1 / loss SCREAMING_SNAKE_CASE__ : Tuple = datetime.now() - before_time SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(p.numel() for p in model.parameters() ) SCREAMING_SNAKE_CASE__ : Tuple = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE__ ) ) } for k, v in heads_to_prune.items(): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : Dict = [ v, ] assert sum(len(SCREAMING_SNAKE_CASE__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = sum(p.numel() for p in model.parameters() ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = datetime.now() SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = compute_heads_importance( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , compute_importance=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , actually_pruned=SCREAMING_SNAKE_CASE__ , ) SCREAMING_SNAKE_CASE__ : Tuple = 1 / loss SCREAMING_SNAKE_CASE__ : Dict = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , pruned_num_params / original_num_params * 1_00 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_00 ) save_model(SCREAMING_SNAKE_CASE__ , args.output_dir ) def _a ( ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=SCREAMING_SNAKE_CASE__ , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=SCREAMING_SNAKE_CASE__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=SCREAMING_SNAKE_CASE__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=SCREAMING_SNAKE_CASE__ , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=1_28 , type=SCREAMING_SNAKE_CASE__ , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=SCREAMING_SNAKE_CASE__ , help="Batch size." ) parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE__ , default=42 ) parser.add_argument("--local_rank" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE__ , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE__ , default="" , help="Can be used for distant debugging." ) SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: SCREAMING_SNAKE_CASE__ : Optional[int] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) SCREAMING_SNAKE_CASE__ : Tuple = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) SCREAMING_SNAKE_CASE__ : Dict = torch.device("cuda" , args.local_rank ) SCREAMING_SNAKE_CASE__ : Optional[int] = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) SCREAMING_SNAKE_CASE__ : str = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.parallel.DistributedDataParallel( SCREAMING_SNAKE_CASE__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE__ ) elif args.n_gpu > 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.DataParallel(SCREAMING_SNAKE_CASE__ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ ) torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE__ ) # Prepare dataset SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) SCREAMING_SNAKE_CASE__ : str = (torch.from_numpy(SCREAMING_SNAKE_CASE__ ),) SCREAMING_SNAKE_CASE__ : str = TensorDataset(*SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: SCREAMING_SNAKE_CASE__ : Optional[int] = mask_heads(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) prune_heads(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
191
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> str: '''simple docstring''' if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) SCREAMING_SNAKE_CASE__ : List[Any] = "" while len(SCREAMING_SNAKE_CASE__ ) % 3 != 0: SCREAMING_SNAKE_CASE__ : str = "0" + bin_string SCREAMING_SNAKE_CASE__ : List[Any] = [ bin_string[index : index + 3] for index in range(len(SCREAMING_SNAKE_CASE__ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: SCREAMING_SNAKE_CASE__ : List[Any] = 0 for index, val in enumerate(SCREAMING_SNAKE_CASE__ ): oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE__ ) ) oct_string += str(SCREAMING_SNAKE_CASE__ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
191
1
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def __UpperCAmelCase ( a_): return (data["data"], data["target"]) def __UpperCAmelCase ( a_ , a_ , a_): snake_case_ = XGBRegressor(verbosity=0 , random_state=42) xgb.fit(a_ , a_) # Predict target for test data snake_case_ = xgb.predict(a_) snake_case_ = predictions.reshape(len(a_) , 1) return predictions def __UpperCAmelCase ( ): snake_case_ = fetch_california_housing() snake_case_ , snake_case_ = data_handling(a_) snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split( a_ , a_ , test_size=0.25 , random_state=1) snake_case_ = xgboost(a_ , a_ , a_) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(a_ , a_)}''') print(f'''Mean Square Error : {mean_squared_error(a_ , a_)}''') if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
178
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase = logging.get_logger(__name__) def __UpperCAmelCase ( a_ , a_=False): snake_case_ = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head'): snake_case_ = 'segformer.encoder.' + key if key.startswith('backbone'): snake_case_ = key.replace('backbone' , 'segformer.encoder') if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 snake_case_ = key[key.find('patch_embed') + len('patch_embed')] snake_case_ = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(a_)-1}''') if "norm" in key: snake_case_ = key.replace('norm' , 'layer_norm') if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 snake_case_ = key[key.find('segformer.encoder.layer_norm') + len('segformer.encoder.layer_norm')] snake_case_ = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(a_)-1}''') if "layer_norm1" in key: snake_case_ = key.replace('layer_norm1' , 'layer_norm_1') if "layer_norm2" in key: snake_case_ = key.replace('layer_norm2' , 'layer_norm_2') if "block" in key: # replace for example block1 by block.0 snake_case_ = key[key.find('block') + len('block')] snake_case_ = key.replace(f'''block{idx}''' , f'''block.{int(a_)-1}''') if "attn.q" in key: snake_case_ = key.replace('attn.q' , 'attention.self.query') if "attn.proj" in key: snake_case_ = key.replace('attn.proj' , 'attention.output.dense') if "attn" in key: snake_case_ = key.replace('attn' , 'attention.self') if "fc1" in key: snake_case_ = key.replace('fc1' , 'dense1') if "fc2" in key: snake_case_ = key.replace('fc2' , 'dense2') if "linear_pred" in key: snake_case_ = key.replace('linear_pred' , 'classifier') if "linear_fuse" in key: snake_case_ = key.replace('linear_fuse.conv' , 'linear_fuse') snake_case_ = key.replace('linear_fuse.bn' , 'batch_norm') if "linear_c" in key: # replace for example linear_c4 by linear_c.3 snake_case_ = key[key.find('linear_c') + len('linear_c')] snake_case_ = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(a_)-1}''') if key.startswith('head'): snake_case_ = key.replace('head' , 'classifier') snake_case_ = value return new_state_dict def __UpperCAmelCase ( a_ , a_): # for each of the encoder blocks: for i in range(config.num_encoder_blocks): for j in range(config.depths[i]): # read in weights + bias of keys and values (which is a single matrix in the original implementation) snake_case_ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''') snake_case_ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''') # next, add keys and values (in that order) to the state dict snake_case_ = kv_weight[ : config.hidden_sizes[i], : ] snake_case_ = kv_bias[: config.hidden_sizes[i]] snake_case_ = kv_weight[ config.hidden_sizes[i] :, : ] snake_case_ = kv_bias[ config.hidden_sizes[i] : ] def __UpperCAmelCase ( ): snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(a_ , stream=a_).raw) return image @torch.no_grad() def __UpperCAmelCase ( a_ , a_ , a_): snake_case_ = SegformerConfig() snake_case_ = False # set attributes based on model_name snake_case_ = 'huggingface/label-files' if "segformer" in model_name: snake_case_ = model_name[len('segformer.') : len('segformer.') + 2] if "ade" in model_name: snake_case_ = 1_50 snake_case_ = 'ade20k-id2label.json' snake_case_ = (1, 1_50, 1_28, 1_28) elif "city" in model_name: snake_case_ = 19 snake_case_ = 'cityscapes-id2label.json' snake_case_ = (1, 19, 1_28, 1_28) else: raise ValueError(f'''Model {model_name} not supported''') elif "mit" in model_name: snake_case_ = True snake_case_ = model_name[4:6] snake_case_ = 10_00 snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = (1, 10_00) else: raise ValueError(f'''Model {model_name} not supported''') # set config attributes snake_case_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset') , 'r')) snake_case_ = {int(a_): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": snake_case_ = [64, 1_28, 3_20, 5_12] snake_case_ = 2_56 elif size == "b2": snake_case_ = [64, 1_28, 3_20, 5_12] snake_case_ = 7_68 snake_case_ = [3, 4, 6, 3] elif size == "b3": snake_case_ = [64, 1_28, 3_20, 5_12] snake_case_ = 7_68 snake_case_ = [3, 4, 18, 3] elif size == "b4": snake_case_ = [64, 1_28, 3_20, 5_12] snake_case_ = 7_68 snake_case_ = [3, 8, 27, 3] elif size == "b5": snake_case_ = [64, 1_28, 3_20, 5_12] snake_case_ = 7_68 snake_case_ = [3, 6, 40, 3] else: raise ValueError(f'''Size {size} not supported''') # load image processor (only resize + normalize) snake_case_ = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=a_ , align=a_ , do_random_crop=a_) # prepare image snake_case_ = prepare_img() snake_case_ = image_processor(images=a_ , return_tensors='pt').pixel_values logger.info(f'''Converting model {model_name}...''') # load original state dict if encoder_only: snake_case_ = torch.load(a_ , map_location=torch.device('cpu')) else: snake_case_ = torch.load(a_ , map_location=torch.device('cpu'))['state_dict'] # rename keys snake_case_ = rename_keys(a_ , encoder_only=a_) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(a_ , a_) # create HuggingFace model and load state dict if encoder_only: snake_case_ = False snake_case_ = SegformerForImageClassification(a_) else: snake_case_ = SegformerForSemanticSegmentation(a_) model.load_state_dict(a_) model.eval() # forward pass snake_case_ = model(a_) snake_case_ = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": snake_case_ = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ]) elif model_name == "segformer.b1.512x512.ade.160k": snake_case_ = torch.tensor( [ [[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]], [[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]], [[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]], ]) elif model_name == "segformer.b2.512x512.ade.160k": snake_case_ = torch.tensor( [ [[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]], [[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]], [[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]], ]) elif model_name == "segformer.b3.512x512.ade.160k": snake_case_ = torch.tensor( [ [[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]], [[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]], [[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]], ]) elif model_name == "segformer.b4.512x512.ade.160k": snake_case_ = torch.tensor( [ [[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]], [[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]], [[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]], ]) elif model_name == "segformer.b5.640x640.ade.160k": snake_case_ = torch.tensor( [ [[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]], [[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]], [[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]], ]) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]], [[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]], [[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]], ]) elif model_name == "segformer.b0.512x1024.city.160k": snake_case_ = torch.tensor( [ [[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]], [[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]], [[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]], ]) elif model_name == "segformer.b0.640x1280.city.160k": snake_case_ = torch.tensor( [ [ [-1.1_372E01, -1.2_787E01, -1.3_477E01], [-1.2_536E01, -1.4_194E01, -1.4_409E01], [-1.3_217E01, -1.4_888E01, -1.5_327E01], ], [ [-1.4_791E01, -1.7_122E01, -1.8_277E01], [-1.7_163E01, -1.9_192E01, -1.9_533E01], [-1.7_897E01, -1.9_991E01, -2.0_315E01], ], [ [7.6_723E-01, 4.1_921E-01, -7.7_878E-02], [4.7_772E-01, 9.5_557E-03, -2.8_082E-01], [3.6_032E-01, -2.4_826E-01, -5.1_168E-01], ], ]) elif model_name == "segformer.b0.768x768.city.160k": snake_case_ = torch.tensor( [ [[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]], [[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]], [[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]], ]) elif model_name == "segformer.b1.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ]) elif model_name == "segformer.b2.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]], [[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]], [[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]], ]) elif model_name == "segformer.b3.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]], [[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]], [[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]], ]) elif model_name == "segformer.b4.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]], [[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]], [[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]], ]) elif model_name == "segformer.b5.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]], [[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]], [[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]], ]) else: snake_case_ = logits.argmax(-1).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx]) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1E-2) # finally, save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''') Path(a_).mkdir(exist_ok=a_) model.save_pretrained(a_) image_processor.save_pretrained(a_) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument( "--model_name", default="segformer.b0.512x512.ade.160k", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) lowercase = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
178
1
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE = importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: if "://" in dataset_path: A__ = dataset_path.split("://" )[1] return dataset_path def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool: if fs is not None and fs.protocol != "file": return True else: return False def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]: A__ = not is_remote_filesystem(lowercase_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(lowercase_ ) , fs._strip_protocol(lowercase_ ) ) else: fs.mv(lowercase_ , lowercase_ , recursive=lowercase_ ) def _SCREAMING_SNAKE_CASE ( ) -> None: if hasattr(fsspec.asyn , "reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: A__ = None A__ = None A__ = threading.Lock()
355
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = { "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class UpperCAmelCase_ ( A_ ): lowercase__ = '''lilt''' def __init__( self : List[str] , snake_case_ : Any=30_522 , snake_case_ : Optional[Any]=768 , snake_case_ : Union[str, Any]=12 , snake_case_ : Union[str, Any]=12 , snake_case_ : Any=3_072 , snake_case_ : List[str]="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Any=512 , snake_case_ : Optional[Any]=2 , snake_case_ : List[str]=0.02 , snake_case_ : Optional[Any]=1e-12 , snake_case_ : List[Any]=0 , snake_case_ : Any="absolute" , snake_case_ : str=None , snake_case_ : int=4 , snake_case_ : int=1_024 , **snake_case_ : Tuple , ) -> List[Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case_ , **snake_case_ ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = classifier_dropout A__ = channel_shrink_ratio A__ = max_ad_position_embeddings
230
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ : Any = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[int] = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] UpperCAmelCase_ : Union[str, Any] = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] UpperCAmelCase_ : Optional[Any] = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): UpperCAmelCase_ : Any = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _A (__a , __a , __a ) -> Dict: """simple docstring""" if gpta_config_file == "": SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaConfig() else: SCREAMING_SNAKE_CASE_ : Tuple = GPTaConfig.from_json_file(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaModel(__a ) # Load weights from numpy load_tf_weights_in_gpta(__a , __a , __a ) # Save pytorch-model SCREAMING_SNAKE_CASE_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME SCREAMING_SNAKE_CASE_ : List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , __a ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(__a , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--gpt2_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
91
1
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def lowerCamelCase_ ( _a , _a , _a ): """simple docstring""" lowerCAmelCase__ : Any = AlbertConfig.from_json_file(_a ) print(f'Building PyTorch model from configuration: {config}' ) lowerCAmelCase__ : List[str] = AlbertForPreTraining(_a ) # Load weights from tf checkpoint load_tf_weights_in_albert(_a , _a , _a ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , _a ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--albert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained ALBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
211
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase_ ( _a ): """simple docstring""" lowerCAmelCase__ : Any = filter(lambda _a : p.requires_grad , model.parameters() ) lowerCAmelCase__ : str = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCamelCase = logging.getLogger(__name__) def lowerCamelCase_ ( _a , _a ): """simple docstring""" if metric == "rouge2": lowerCAmelCase__ : Optional[int] = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": lowerCAmelCase__ : Optional[int] = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": lowerCAmelCase__ : List[Any] = '''{val_avg_em:.4f}-{step_count}''' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ''' function.''' ) lowerCAmelCase__ : Dict = ModelCheckpoint( dirpath=_a , filename=_a , monitor=f'val_{metric}' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase_ ( _a , _a ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_a , verbose=_a , ) class _a ( pl.Callback): def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any )-> Optional[int]: lowerCAmelCase__ : Dict = {F'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE ) @rank_zero_only def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str]=True )-> None: logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) lowerCAmelCase__ : List[Any] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results lowerCAmelCase__ : List[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": lowerCAmelCase__ : Optional[int] = od / '''test_results.txt''' lowerCAmelCase__ : Tuple = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowerCAmelCase__ : int = od / F'{type_path}_results/{trainer.global_step:05d}.txt' lowerCAmelCase__ : int = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , '''a+''' ) as writer: for key in sorted(_SCREAMING_SNAKE_CASE ): if key in ["log", "progress_bar", "preds"]: continue lowerCAmelCase__ : Optional[int] = metrics[key] if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): lowerCAmelCase__ : List[str] = val.item() lowerCAmelCase__ : List[str] = F'{key}: {val:.6f}\n' writer.write(_SCREAMING_SNAKE_CASE ) if not save_generations: return if "preds" in metrics: lowerCAmelCase__ : Dict = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(_SCREAMING_SNAKE_CASE ) @rank_zero_only def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Optional[int]: try: lowerCAmelCase__ : Tuple = pl_module.model.model.num_parameters() except AttributeError: lowerCAmelCase__ : Optional[Any] = pl_module.model.num_parameters() lowerCAmelCase__ : Dict = count_trainable_parameters(_SCREAMING_SNAKE_CASE ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} ) @rank_zero_only def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule )-> Optional[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''test''' ) @rank_zero_only def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : List[Any] )-> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
211
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a ( __a ): __a : Optional[int] = ["""image_processor""", """tokenizer"""] __a : str = """CLIPImageProcessor""" __a : Optional[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self : Optional[int] , lowercase : int=None , lowercase : Tuple=None , **lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowercase , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowercase , lowercase ) def __call__( self : int , lowercase : List[str]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : int ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase ) if images is not None: UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase ) def A ( self : int , *lowercase : Any , **lowercase : Union[str, Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A ( self : Optional[Any] , *lowercase : Any , **lowercase : Union[str, Any] ): '''simple docstring''' return self.tokenizer.decode(*lowercase , **lowercase ) @property def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self : List[Any] ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , ) return self.image_processor_class @property def A ( self : Any ): '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase , ) return self.image_processor
34
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return x + 2 class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) snake_case_ = '''x = y''' snake_case_ = {'''y''': 5} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} ) def snake_case__( self : Dict ) ->Optional[int]: snake_case_ = '''y = add_two(x)''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result is None assert "tried to execute add_two" in out.out def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Dict ) ->str: snake_case_ = '''x = 3\ny = 5''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) def snake_case__( self : str ) ->Tuple: snake_case_ = '''text = f\'This is x: {x}.\'''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} ) def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} ) snake_case_ = {'''x''': 8} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} ) def snake_case__( self : str ) ->str: snake_case_ = '''test_list = [x, add_two(x)]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , [3, 5] ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = '''y = x''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} ) def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 0\nfor i in range(3):\n x = i''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase ) assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} )
8
0
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = GPTSanJapaneseTokenizer _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = {"""do_clean_text""": False, """add_prefix_space""": False} def A ( self : Any ): """simple docstring""" super().setUp() # fmt: off UpperCamelCase = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on UpperCamelCase = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 UpperCamelCase = {'unk_token': '<unk>'} UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file , 'w' ) as emoji_writer: emoji_writer.write(json.dumps(UpperCamelCase__ ) ) def A ( self : Optional[Any] , **UpperCamelCase__ : Dict ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = 'こんにちは、世界。 \nこんばんは、㔺界。😀' UpperCamelCase = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def A ( self : Any , UpperCamelCase__ : List[Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.get_input_output_texts(UpperCamelCase__ ) UpperCamelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) UpperCamelCase = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) return text, ids def A ( self : Union[str, Any] ): """simple docstring""" pass # TODO add if relevant def A ( self : Optional[int] ): """simple docstring""" pass # TODO add if relevant def A ( self : Union[str, Any] ): """simple docstring""" pass # TODO add if relevant def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.get_tokenizer() # Testing tokenization UpperCamelCase = 'こんにちは、世界。 こんばんは、㔺界。' UpperCamelCase = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] UpperCamelCase = tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) # Testing conversion to ids without special tokens UpperCamelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] UpperCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) # Testing conversion to ids with special tokens UpperCamelCase = tokens + [tokenizer.unk_token] UpperCamelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9] UpperCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : str ): """simple docstring""" UpperCamelCase = self.get_tokenizer() # Testing tokenization UpperCamelCase = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' UpperCamelCase = 'こんにちは、、、、世界。こんばんは、、、、世界。' UpperCamelCase = tokenizer.encode(UpperCamelCase__ ) UpperCamelCase = tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization UpperCamelCase = 'こんにちは、世界。' UpperCamelCase = 'こんばんは、㔺界。😀' UpperCamelCase = 'こんにちは、世界。こんばんは、世界。😀' UpperCamelCase = tokenizer.encode(prefix_text + input_text ) UpperCamelCase = tokenizer.encode('' , prefix_text=prefix_text + input_text ) UpperCamelCase = tokenizer.encode(UpperCamelCase__ , prefix_text=UpperCamelCase__ ) UpperCamelCase = tokenizer.decode(UpperCamelCase__ ) UpperCamelCase = tokenizer.decode(UpperCamelCase__ ) UpperCamelCase = tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization UpperCamelCase = 'こんにちは、世界。' UpperCamelCase = 'こんばんは、㔺界。😀' UpperCamelCase = len(tokenizer.encode(UpperCamelCase__ ) ) - 2 UpperCamelCase = len(tokenizer.encode(UpperCamelCase__ ) ) - 2 UpperCamelCase = [1] + [0] * (len_prefix + len_text + 1) UpperCamelCase = [1] * (len_prefix + len_text + 1) + [0] UpperCamelCase = [1] + [1] * (len_prefix) + [0] * (len_text + 1) UpperCamelCase = tokenizer(prefix_text + input_text ).token_type_ids UpperCamelCase = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids UpperCamelCase = tokenizer(UpperCamelCase__ , prefix_text=UpperCamelCase__ ).token_type_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def A ( self : Dict ): """simple docstring""" UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) UpperCamelCase = tokenizer.encode('あンいワ' ) UpperCamelCase = tokenizer.encode('' , prefix_text='あンいワ' ) UpperCamelCase = tokenizer.encode('いワ' , prefix_text='あン' ) self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , tokenizer.decode(UpperCamelCase__ ) ) self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , tokenizer.decode(UpperCamelCase__ ) ) self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) UpperCamelCase = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] UpperCamelCase = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ ) UpperCamelCase = tokenizer.batch_encode_plus(UpperCamelCase__ , padding=UpperCamelCase__ ) # fmt: off UpperCamelCase = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]] UpperCamelCase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] UpperCamelCase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , UpperCamelCase__ ) self.assertListEqual(x_token.token_type_ids , UpperCamelCase__ ) self.assertListEqual(x_token.attention_mask , UpperCamelCase__ ) self.assertListEqual(x_token_a.input_ids , UpperCamelCase__ ) self.assertListEqual(x_token_a.token_type_ids , UpperCamelCase__ ) self.assertListEqual(x_token_a.attention_mask , UpperCamelCase__ ) def A ( self : Dict ): """simple docstring""" pass def A ( self : Optional[int] ): """simple docstring""" pass
249
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = (UnCLIPScheduler,) def A ( self : Union[str, Any] , **UpperCamelCase__ : Any ): """simple docstring""" UpperCamelCase = { 'num_train_timesteps': 1_0_0_0, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**UpperCamelCase__ ) return config def A ( self : str ): """simple docstring""" for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def A ( self : List[str] ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def A ( self : List[Any] ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" for clip_sample_range in [1, 5, 1_0, 2_0]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def A ( self : Union[str, Any] ): """simple docstring""" for time_step in [0, 5_0_0, 9_9_9]: for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(variance_type='fixed_small_log' ) UpperCamelCase = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5 def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(variance_type='learned_range' ) UpperCamelCase = scheduler_class(**UpperCamelCase__ ) UpperCamelCase = 0.5 assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -1_0.1_7_1_2_7_9_0 < 1E-5 assert scheduler._get_variance(4_8_7 , predicted_variance=UpperCamelCase__ ) - -5.7_9_9_8_0_5_2 < 1E-5 assert scheduler._get_variance(9_9_9 , predicted_variance=UpperCamelCase__ ) - -0.0_0_1_0_0_1_1 < 1E-5 def A ( self : int ): """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**UpperCamelCase__ ) UpperCamelCase = scheduler.timesteps UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) ) UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2 assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3 def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(2_5 ) UpperCamelCase = scheduler.timesteps UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: UpperCamelCase = None else: UpperCamelCase = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) ) UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2 assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3 def A ( self : Tuple ): """simple docstring""" pass def A ( self : Optional[int] ): """simple docstring""" pass
249
1
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ) -> str: A = {} def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : List[Any] ,A_ : int=1 ) -> Optional[Any]: if self.graph.get(A_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A = [[w, v]] if not self.graph.get(A_ ): A = [] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: return list(self.graph ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[Any] ,A_ : Dict ) -> Any: if self.graph.get(A_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int=-2 ,A_ : Dict=-1 ) -> Any: if s == d: return [] A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str]=-1 ) -> Optional[int]: if c == -1: A = floor(random() * 1_0000 ) + 10 for i in range(A_ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A = floor(random() * c ) + 1 if n != i: self.add_pair(A_ ,A_ ,1 ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=-2 ) -> Tuple: A = deque() A = [] if s == -2: A = list(self.graph )[0] d.append(A_ ) visited.append(A_ ) while d: A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ) -> Dict: A = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[str] ) -> List[str]: return len(self.graph[u] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[str]=-2 ) -> Optional[Any]: A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s A = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return sorted_nodes def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return list(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return False def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any]=-2 ,A_ : Union[str, Any]=-1 ) -> str: A = time() self.dfs(A_ ,A_ ) A = time() return end - begin def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : str=-2 ) -> List[str]: A = time() self.bfs(A_ ) A = time() return end - begin class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int ) -> Dict: A = {} def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : int=1 ) -> str: # check if the u exists if self.graph.get(A_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A = [[w, v]] # add the other way if self.graph.get(A_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A = [[w, u]] def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : int ) -> int: if self.graph.get(A_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A_ ) # the other way round if self.graph.get(A_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Any=-2 ,A_ : Optional[Any]=-1 ) -> Tuple: if s == d: return [] A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return visited def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Dict=-1 ) -> Tuple: if c == -1: A = floor(random() * 1_0000 ) + 10 for i in range(A_ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A = floor(random() * c ) + 1 if n != i: self.add_pair(A_ ,A_ ,1 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any]=-2 ) -> Union[str, Any]: A = deque() A = [] if s == -2: A = list(self.graph )[0] d.append(A_ ) visited.append(A_ ) while d: A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tuple ) -> Any: return len(self.graph[u] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return list(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return False def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: return list(self.graph ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple=-2 ,A_ : Any=-1 ) -> List[Any]: A = time() self.dfs(A_ ,A_ ) A = time() return end - begin def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any]=-2 ) -> str: A = time() self.bfs(A_ ) A = time() return end - begin
74
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''deit''' def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = encoder_stride class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: int = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float: return 1e-4
74
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : List[str] = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
369
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a_ ( __snake_case : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ =[ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def a_ ( __snake_case : List[Any] ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =emb.weight.shape lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case ) lowerCamelCase_ =emb.weight.data return lin_layer def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict: """simple docstring""" lowerCamelCase_ ={} for old_key in state_dict.keys(): lowerCamelCase_ =old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' ) else: lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCamelCase_ =state_dict[old_key] return new_dict def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =0 os.makedirs(__snake_case , exist_ok=__snake_case ) for expert in range(__snake_case ): lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(__snake_case ): lowerCamelCase_ =torch.load(__snake_case )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =os.path.join( __snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) torch.save(__snake_case , __snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__snake_case )[0]].dtype ) # Add the last block lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__snake_case ) == 1: lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) torch.save(__snake_case , __snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__snake_case , __snake_case ) # Otherwise, let's build the index lowerCamelCase_ ={} for idx, shard in enumerate(__snake_case ): lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' ) lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) for key in shard: lowerCamelCase_ =shard_file # Add the metadata lowerCamelCase_ ={'''total_size''': total_size} lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n''' f.write(__snake_case ) return metadata, index if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a_ : Tuple = parser.parse_args() a_ , a_ : int = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_28, args.dtype, ) a_ : Tuple = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28 ) config.save_pretrained(args.pytorch_dump_folder_path) a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
6
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase_ = [ 'small', 'small-base', 'medium', 'medium-base', 'intermediate', 'intermediate-base', 'large', 'large-base', 'xlarge', 'xlarge-base', ] lowerCAmelCase_ = { 'vocab_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json', 'funnel-transformer/small-base': ( 'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json' ), 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json', 'funnel-transformer/large-base': ( 'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json' ), 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json' ), }, } lowerCAmelCase_ = {F'''funnel-transformer/{name}''': 512 for name in _model_names} lowerCAmelCase_ = {F'''funnel-transformer/{name}''': {'do_lower_case': True} for name in _model_names} class __A ( A_ ): '''simple docstring''' lowerCAmelCase : str = VOCAB_FILES_NAMES lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : int = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : Dict = FunnelTokenizer lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : int = 2 def __init__( self : Optional[int] ,_snake_case : Dict=None ,_snake_case : List[str]=None ,_snake_case : Optional[Any]=True ,_snake_case : Optional[int]="<unk>" ,_snake_case : Dict="<sep>" ,_snake_case : Any="<pad>" ,_snake_case : str="<cls>" ,_snake_case : Optional[Any]="<mask>" ,_snake_case : int="<s>" ,_snake_case : Dict="</s>" ,_snake_case : Optional[int]=True ,_snake_case : List[str]=True ,_snake_case : Dict=None ,_snake_case : str="##" ,**_snake_case : Optional[Any] ,) -> Any: """simple docstring""" super().__init__( _snake_case ,tokenizer_file=_snake_case ,do_lower_case=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,clean_text=_snake_case ,tokenize_chinese_chars=_snake_case ,strip_accents=_snake_case ,wordpieces_prefix=_snake_case ,**_snake_case ,) lowercase__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' ,_snake_case ) != do_lower_case or normalizer_state.get('''strip_accents''' ,_snake_case ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' ,_snake_case ) != tokenize_chinese_chars ): lowercase__ : List[str] = getattr(_snake_case ,normalizer_state.pop('''type''' ) ) lowercase__ : List[str] = do_lower_case lowercase__ : Any = strip_accents lowercase__ : Union[str, Any] = tokenize_chinese_chars lowercase__ : Union[str, Any] = normalizer_class(**_snake_case ) lowercase__ : List[str] = do_lower_case def UpperCAmelCase ( self : int ,_snake_case : Tuple ,_snake_case : Tuple=None ) -> Optional[Any]: """simple docstring""" lowercase__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase ( self : Tuple ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ : Optional[int] = [self.sep_token_id] lowercase__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase ( self : Tuple ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" lowercase__ : str = self._tokenizer.model.save(_snake_case ,name=_snake_case ) return tuple(_snake_case )
16
"""simple docstring""" _UpperCamelCase: Dict = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] _UpperCamelCase: Optional[int] = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] _UpperCamelCase: int = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] _UpperCamelCase: List[str] = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] _UpperCamelCase: Any = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] _UpperCamelCase: str = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] _UpperCamelCase: Optional[Any] = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] _UpperCamelCase: Optional[int] = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
255
0
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": lowercase_ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") lowercase_ = F'''https://www.google.com/search?q={query}&num=100''' lowercase_ = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: lowercase_ = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: lowercase_ = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["url"][0] webbrowser.open(link)
20
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ): '''simple docstring''' from .. import __version__ __snake_case : List[Any] = take_from __snake_case : List[Any] = () if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ): __snake_case : str = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) __snake_case : Optional[Any] = None if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),) __snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),) __snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: __snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: __snake_case : Optional[Any] = warning + """ """ if standard_warn else """""" warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0: __snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1] __snake_case : int = call_frame.filename __snake_case : int = call_frame.lineno __snake_case : List[str] = call_frame.function __snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(__SCREAMING_SNAKE_CASE ) == 0: return elif len(__SCREAMING_SNAKE_CASE ) == 1: return values[0] return values
20
1
'''simple docstring''' import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py SCREAMING_SNAKE_CASE__ = '.' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) SCREAMING_SNAKE_CASE__ = [ 'Assert', 'AssignVariableOp', 'EmptyTensorList', 'MergeV2Checkpoints', 'ReadVariableOp', 'ResourceGather', 'RestoreV2', 'SaveV2', 'ShardedFilename', 'StatefulPartitionedCall', 'StaticRegexFullMatch', 'VarHandleOp', ] def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]: UpperCamelCase = SavedModel() UpperCamelCase = [] with open(os.path.join(__UpperCamelCase , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f: UpperCamelCase = json.load(__UpperCamelCase )["""opsets"""] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(__UpperCamelCase )] ) with open(__UpperCamelCase , """rb""" ) as f: saved_model.ParseFromString(f.read() ) UpperCamelCase = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want UpperCamelCase = sorted(__UpperCamelCase ) UpperCamelCase = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__UpperCamelCase ) if strict and len(__UpperCamelCase ) > 0: raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops ) elif len(__UpperCamelCase ) > 0: print(F"Found the following incompatible ops for the opset {opset}:" ) print(*__UpperCamelCase , sep="""\n""" ) else: print(F"The saved model {saved_model_path} can properly be converted with ONNX." ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).') parser.add_argument( '--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.' ) parser.add_argument( '--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.' ) parser.add_argument( '--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
321
'''simple docstring''' import math def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float: if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
321
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ : Tuple = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ : Tuple = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class __lowerCAmelCase ( __a ): snake_case : Optional[Any] = VOCAB_FILES_NAMES snake_case : Dict = PRETRAINED_VOCAB_FILES_MAP snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case : str = ["""input_ids""", """attention_mask"""] snake_case : List[str] = RobertaTokenizer def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ): super().__init__( lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , ) _UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase__ ) != add_prefix_space: _UpperCAmelCase : Tuple = getattr(lowerCAmelCase__ , pre_tok_state.pop("""type""" ) ) _UpperCAmelCase : Any = add_prefix_space _UpperCAmelCase : List[Any] = pre_tok_class(**lowerCAmelCase__ ) _UpperCAmelCase : Dict = add_prefix_space _UpperCAmelCase : int = """post_processor""" _UpperCAmelCase : Any = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ ) if tokenizer_component_instance: _UpperCAmelCase : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _UpperCAmelCase : Any = tuple(state["""sep"""] ) if "cls" in state: _UpperCAmelCase : Tuple = tuple(state["""cls"""] ) _UpperCAmelCase : Dict = False if state.get("""add_prefix_space""" , lowerCAmelCase__ ) != add_prefix_space: _UpperCAmelCase : List[str] = add_prefix_space _UpperCAmelCase : Dict = True if state.get("""trim_offsets""" , lowerCAmelCase__ ) != trim_offsets: _UpperCAmelCase : Tuple = trim_offsets _UpperCAmelCase : List[str] = True if changes_to_apply: _UpperCAmelCase : Dict = getattr(lowerCAmelCase__ , state.pop("""type""" ) ) _UpperCAmelCase : Optional[Any] = component_class(**lowerCAmelCase__ ) setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ ) @property def snake_case_ (self ): if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def snake_case_ (self , lowerCAmelCase__ ): _UpperCAmelCase : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value _UpperCAmelCase : int = value def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ): _UpperCAmelCase : Optional[Any] = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ): _UpperCAmelCase : Optional[int] = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ): _UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=None ): _UpperCAmelCase : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ): _UpperCAmelCase : str = [self.sep_token_id] _UpperCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
170
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowerCAmelCase_ : List[Any] = None lowerCAmelCase_ : Any = logging.get_logger(__name__) lowerCAmelCase_ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ : List[str] = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ : Tuple = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } lowerCAmelCase_ : str = '''▁''' class __lowerCAmelCase ( __a ): snake_case : List[str] = VOCAB_FILES_NAMES snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case : str = ["""input_ids""", """attention_mask"""] snake_case : List[Any] = BarthezTokenizer def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , **lowerCAmelCase__ , ): # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase : Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) _UpperCAmelCase : List[str] = vocab_file _UpperCAmelCase : Tuple = False if not self.vocab_file else True def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase : int = [self.cls_token_id] _UpperCAmelCase : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ): _UpperCAmelCase : str = [self.sep_token_id] _UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowerCAmelCase__ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return _UpperCAmelCase : Union[str, Any] = os.path.join( lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
170
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase = { """vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""}, """tokenizer_file""": { """mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json""" }, } _UpperCAmelCase = {"""mobilebert-uncased""": 5_1_2} _UpperCAmelCase = {} class a ( UpperCAmelCase__ ): UpperCamelCase : Dict = VOCAB_FILES_NAMES UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase : Union[str, Any] = MobileBertTokenizer def __init__( self : List[Any] , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[int]="[UNK]" , lowerCAmelCase : List[Any]="[SEP]" , lowerCAmelCase : str="[PAD]" , lowerCAmelCase : int="[CLS]" , lowerCAmelCase : Tuple="[MASK]" , lowerCAmelCase : Any=True , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] , ) -> Dict: '''simple docstring''' super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , ) SCREAMING_SNAKE_CASE_: Tuple =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE_: Union[str, Any] =getattr(lowerCAmelCase , normalizer_state.pop("""type""" ) ) SCREAMING_SNAKE_CASE_: List[Any] =do_lower_case SCREAMING_SNAKE_CASE_: Union[str, Any] =strip_accents SCREAMING_SNAKE_CASE_: List[Any] =tokenize_chinese_chars SCREAMING_SNAKE_CASE_: Optional[Any] =normalizer_class(**lowerCAmelCase ) SCREAMING_SNAKE_CASE_: str =do_lower_case def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Any=None ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Any =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Union[str, Any] =[self.sep_token_id] SCREAMING_SNAKE_CASE_: List[str] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Dict =self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase ) return tuple(lowerCAmelCase )
173
"""simple docstring""" import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __magic_name__ ( *lowercase ): if not isinstance(lowercase , lowercase ): SCREAMING_SNAKE_CASE_: Optional[Any] =list(lowercase ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE_: Optional[Any] =None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __magic_name__ ( lowercase ): SCREAMING_SNAKE_CASE_: List[Any] =[ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(lowercase , lowercase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __magic_name__ ( lowercase = None , lowercase = 128 ): if function is None: return functools.partial(lowercase , starting_batch_size=lowercase ) SCREAMING_SNAKE_CASE_: str =starting_batch_size def decorator(*lowercase , **lowercase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE_: Optional[int] =list(inspect.signature(lowercase ).parameters.keys() ) # Guard against user error if len(lowercase ) < (len(lowercase ) + 1): SCREAMING_SNAKE_CASE_: List[Any] =""", """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(lowercase , *lowercase , **lowercase ) except Exception as e: if should_reduce_batch_size(lowercase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
173
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self , a , a=3 , a=32 , a=3 , a=10 , a=[10, 20, 30, 40] , a=[1, 1, 2, 1] , a=True , a=True , a="relu" , a=3 , a=None , ) -> Union[str, Any]: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embeddings_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = scope SCREAMING_SNAKE_CASE = len(a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any: SCREAMING_SNAKE_CASE = TFResNetModel(config=a) SCREAMING_SNAKE_CASE = model(a) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int: SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = TFResNetForImageClassification(a) SCREAMING_SNAKE_CASE = model(a , labels=a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class _snake_case ( A__ , A__ , unittest.TestCase ): _lowercase : List[Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _lowercase : Dict = ( {'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification} if is_tf_available() else {} ) _lowercase : Union[str, Any] = False _lowercase : Any = False _lowercase : List[str] = False _lowercase : str = False _lowercase : int = False def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = TFResNetModelTester(self) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , has_text_modality=a) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: return @unittest.skip(reason='ResNet does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ ( self) -> int: pass @unittest.skip(reason='ResNet does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: pass def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(a) SCREAMING_SNAKE_CASE = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ['pixel_values'] self.assertListEqual(arg_names[:1] , a) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: def check_hidden_states_output(a , a , a): SCREAMING_SNAKE_CASE = model_class(a) SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(a , a)) SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(a) , expected_num_stages + 1) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE = layer_type SCREAMING_SNAKE_CASE = True check_hidden_states_output(a , a , a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(a , a , a) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a) @slow def SCREAMING_SNAKE_CASE__ ( self) -> str: for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = TFResNetModel.from_pretrained(a) self.assertIsNotNone(a) def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_tf @require_vision class _snake_case ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='tf') # forward pass SCREAMING_SNAKE_CASE = model(**a) # verify the logits SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape , a) SCREAMING_SNAKE_CASE = tf.constant([-11.10_69, -9.78_77, -8.37_77]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , a , atol=1E-4))
327
from math import isqrt def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = [True] * max_number for i in range(2 , isqrt(max_number - 1) + 1): if is_prime[i]: for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = False return [i for i in range(2 , _UpperCAmelCase) if is_prime[i]] def lowerCamelCase__ (_UpperCAmelCase = 10**8): SCREAMING_SNAKE_CASE = calculate_prime_numbers(max_number // 2) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f"""{solution() = }""")
327
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :Tuple = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class A_ ( lowerCAmelCase_ ): _lowerCamelCase : List[str] = """sew-d""" def __init__( self : Optional[Any] , snake_case_ : Tuple=3_2 , snake_case_ : Optional[Any]=7_6_8 , snake_case_ : Tuple=1_2 , snake_case_ : Union[str, Any]=1_2 , snake_case_ : Tuple=3_0_7_2 , snake_case_ : Tuple=2 , snake_case_ : int=5_1_2 , snake_case_ : Optional[int]=2_5_6 , snake_case_ : Union[str, Any]=True , snake_case_ : Any=True , snake_case_ : str=("p2c", "c2p") , snake_case_ : Dict="layer_norm" , snake_case_ : str="gelu_python" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Any=0.0 , snake_case_ : Tuple=0.1 , snake_case_ : Union[str, Any]=0.0_2 , snake_case_ : str=1e-7 , snake_case_ : Optional[Any]=1e-5 , snake_case_ : Optional[Any]="group" , snake_case_ : Tuple="gelu" , snake_case_ : Tuple=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_ : Dict=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_ : int=False , snake_case_ : Union[str, Any]=1_2_8 , snake_case_ : int=1_6 , snake_case_ : Any=True , snake_case_ : Tuple=0.0_5 , snake_case_ : Tuple=1_0 , snake_case_ : Dict=2 , snake_case_ : Tuple=0.0 , snake_case_ : List[Any]=1_0 , snake_case_ : Union[str, Any]=0 , snake_case_ : Any="mean" , snake_case_ : Optional[Any]=False , snake_case_ : Any=False , snake_case_ : Tuple=2_5_6 , snake_case_ : int=0 , snake_case_ : Optional[Any]=1 , snake_case_ : List[str]=2 , **snake_case_ : List[str] , ): super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ ) _UpperCAmelCase = hidden_size _UpperCAmelCase = feat_extract_norm _UpperCAmelCase = feat_extract_activation _UpperCAmelCase = list(snake_case_ ) _UpperCAmelCase = list(snake_case_ ) _UpperCAmelCase = list(snake_case_ ) _UpperCAmelCase = conv_bias _UpperCAmelCase = num_conv_pos_embeddings _UpperCAmelCase = num_conv_pos_embedding_groups _UpperCAmelCase = len(self.conv_dim ) _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = intermediate_size _UpperCAmelCase = squeeze_factor _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = position_buckets _UpperCAmelCase = share_att_key _UpperCAmelCase = relative_attention _UpperCAmelCase = norm_rel_ebd _UpperCAmelCase = list(snake_case_ ) _UpperCAmelCase = hidden_act _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = feat_proj_dropout _UpperCAmelCase = final_dropout _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = feature_layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCAmelCase = apply_spec_augment _UpperCAmelCase = mask_time_prob _UpperCAmelCase = mask_time_length _UpperCAmelCase = mask_time_min_masks _UpperCAmelCase = mask_feature_prob _UpperCAmelCase = mask_feature_length _UpperCAmelCase = mask_feature_min_masks # ctc loss _UpperCAmelCase = ctc_loss_reduction _UpperCAmelCase = ctc_zero_infinity # sequence classification _UpperCAmelCase = use_weighted_layer_sum _UpperCAmelCase = classifier_proj_size @property def lowercase ( self : Any ): return functools.reduce(operator.mul , self.conv_stride , 1 )
22
from pathlib import Path import fire def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : int ): lowerCAmelCase_ : List[str] = Path(__UpperCamelCase ) lowerCAmelCase_ : Union[str, Any] = Path(__UpperCamelCase ) dest_dir.mkdir(exist_ok=__UpperCamelCase ) for path in src_dir.iterdir(): lowerCAmelCase_ : Optional[Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] lowerCAmelCase_ : List[str] = dest_dir.joinpath(path.name ) print(__UpperCamelCase ) dest_path.open('''w''' ).write('''\n'''.join(__UpperCamelCase ) ) if __name__ == "__main__": fire.Fire(minify)
103
0
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = (DDPMParallelScheduler,) def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ ={ """num_train_timesteps""": 1000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**_SCREAMING_SNAKE_CASE ) return config def _snake_case ( self )-> List[Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Dict: for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Dict: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Optional[Any]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> int: self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , ) def _snake_case ( self )-> Optional[int]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> List[Any]: for t in [0, 500, 999]: self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5 def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.dummy_model() lowerCamelCase_ =self.dummy_sample_deter lowerCamelCase_ =self.dummy_sample_deter + 0.1 lowerCamelCase_ =self.dummy_sample_deter - 0.1 lowerCamelCase_ =samplea.shape[0] lowerCamelCase_ =torch.stack([samplea, samplea, samplea] , dim=0 ) lowerCamelCase_ =torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowerCamelCase_ =scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) lowerCamelCase_ =torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3 def _snake_case ( self )-> int: lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.dummy_model() lowerCamelCase_ =self.dummy_sample_deter lowerCamelCase_ =torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCamelCase_ =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample lowerCamelCase_ =pred_prev_sample lowerCamelCase_ =torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3 def _snake_case ( self )-> str: lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config(prediction_type="""v_prediction""" ) lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.dummy_model() lowerCamelCase_ =self.dummy_sample_deter lowerCamelCase_ =torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCamelCase_ =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample lowerCamelCase_ =pred_prev_sample lowerCamelCase_ =torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3 def _snake_case ( self )-> str: lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =[100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =scheduler.timesteps for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ): if i == len(_SCREAMING_SNAKE_CASE ) - 1: lowerCamelCase_ =-1 else: lowerCamelCase_ =timesteps[i + 1] lowerCamelCase_ =scheduler.previous_timestep(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =prev_t.item() self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> int: lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =[100, 87, 50, 51, 0] with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =[100, 87, 50, 1, 0] lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Optional[int]: lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =[scheduler.config.num_train_timesteps] with self.assertRaises( _SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
49
from __future__ import annotations def __UpperCamelCase ( _A : list[int | str] ) ->None: """simple docstring""" create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] ) def __UpperCamelCase ( _A : list[int | str] , _A : list[int | str] , _A : int , _A : list[int] , ) ->None: """simple docstring""" if index == len(_A ): print(_A ) return for i in range(len(_A ) ): if not index_used[i]: current_sequence.append(sequence[i] ) lowerCamelCase_ =True create_state_space_tree(_A , _A , index + 1 , _A ) current_sequence.pop() lowerCamelCase_ =False __A : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) __A : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
49
1
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=9_9 , _UpperCamelCase=3_2 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_0 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=None , ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : Any = batch_size UpperCAmelCase_ : str = seq_length UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Optional[int] = use_input_mask UpperCAmelCase_ : str = vocab_size UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : str = num_attention_heads UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = max_position_embeddings UpperCAmelCase_ : Optional[int] = initializer_range UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : str = scope def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Optional[Any] = None if self.use_input_mask: UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[Any] = self.get_config() return config, input_ids, input_mask, token_labels def __UpperCAmelCase ( self ) -> Dict: return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self ) -> Tuple: ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = BertGenerationEncoder(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : Tuple = model(_UpperCamelCase , attention_mask=_UpperCamelCase ) UpperCAmelCase_ : str = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = True UpperCAmelCase_ : Optional[Any] = BertGenerationEncoder(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : Tuple = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , ) UpperCAmelCase_ : str = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ) -> Any: UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : Union[str, Any] = True UpperCAmelCase_ : Optional[int] = BertGenerationDecoder(config=_UpperCamelCase ).to(_UpperCamelCase ).eval() # first forward pass UpperCAmelCase_ : List[str] = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase , ) UpperCAmelCase_ : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase_ : List[str] = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['hidden_states'][0] UpperCAmelCase_ : List[Any] = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['hidden_states'][0] # select random slice UpperCAmelCase_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , ) -> Union[str, Any]: UpperCAmelCase_ : int = BertGenerationDecoder(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : Tuple = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCamelCase (_snake_case , _snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () _snake_case : List[Any] = (BertGenerationDecoder,) if is_torch_available() else () _snake_case : int = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : Tuple = BertGenerationEncoderTester(self ) UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 ) def __UpperCAmelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() UpperCAmelCase_ : Tuple = 'bert' self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Union[str, Any]: # This regression test was failing with PyTorch < 1.3 ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase_ : str = None self.model_tester.create_and_check_model_as_decoder( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*_UpperCamelCase ) @slow def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(_UpperCamelCase ) @require_torch class lowerCamelCase (unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : Any = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) UpperCAmelCase_ : Optional[int] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(_UpperCamelCase )[0] UpperCAmelCase_ : Dict = torch.Size([1, 8, 1_0_2_4] ) self.assertEqual(output.shape , _UpperCamelCase ) UpperCAmelCase_ : Dict = torch.tensor( [[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) ) @require_torch class lowerCamelCase (unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) UpperCAmelCase_ : Union[str, Any] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): UpperCAmelCase_ : str = model(_UpperCamelCase )[0] UpperCAmelCase_ : str = torch.Size([1, 8, 5_0_3_5_8] ) self.assertEqual(output.shape , _UpperCamelCase ) UpperCAmelCase_ : Dict = torch.tensor( [[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
29
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : Tuple = [] for _ in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : List[str] = [] for step in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" ) torch.save(scheduler.state_dict() , __magic_name__ ) UpperCAmelCase : Any = torch.load(__magic_name__ ) scheduler.load_state_dict(__magic_name__ ) return lrs @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : Any = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): UpperCAmelCase : List[Any] = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : str = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , ) for _ in range(1_0_0_0 ): UpperCAmelCase : str = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : Optional[int] = 10 def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCAmelCase : int = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): UpperCAmelCase , UpperCAmelCase : Any = data UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps ) self.assertListAlmostEqual( snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps ) self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = fn def __call__( self , *snake_case , **snake_case ): '''simple docstring''' return self.fn(*snake_case , **snake_case ) @classmethod def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
311
0
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A_ :List[str] = { '''facebook/mask2former-swin-small-coco-instance''': ( '''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json''' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } A_ :int = logging.get_logger(__name__) class __A ( a ): """simple docstring""" UpperCamelCase__ : Union[str, Any] ="""mask2former""" UpperCamelCase__ : Tuple =["""swin"""] UpperCamelCase__ : Dict ={"""hidden_size""": """hidden_dim"""} def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ): """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __UpperCamelCase : Optional[int] =CONFIG_MAPPING['swin']( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __UpperCamelCase : List[str] =backbone_config.pop('model_type' ) __UpperCamelCase : str =CONFIG_MAPPING[backbone_model_type] __UpperCamelCase : List[Any] =config_class.from_dict(lowerCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ' f'Supported model types: {",".join(self.backbones_supported )}' ) __UpperCamelCase : Dict =backbone_config __UpperCamelCase : Optional[int] =feature_size __UpperCamelCase : Union[str, Any] =mask_feature_size __UpperCamelCase : Tuple =hidden_dim __UpperCamelCase : Optional[int] =encoder_feedforward_dim __UpperCamelCase : Optional[int] =activation_function __UpperCamelCase : Dict =encoder_layers __UpperCamelCase : List[Any] =decoder_layers __UpperCamelCase : int =num_attention_heads __UpperCamelCase : Optional[Any] =dropout __UpperCamelCase : int =dim_feedforward __UpperCamelCase : Any =pre_norm __UpperCamelCase : Union[str, Any] =enforce_input_projection __UpperCamelCase : str =common_stride __UpperCamelCase : List[str] =ignore_value __UpperCamelCase : Optional[int] =num_queries __UpperCamelCase : Any =no_object_weight __UpperCamelCase : int =class_weight __UpperCamelCase : str =mask_weight __UpperCamelCase : Dict =dice_weight __UpperCamelCase : str =train_num_points __UpperCamelCase : str =oversample_ratio __UpperCamelCase : int =importance_sample_ratio __UpperCamelCase : List[str] =init_std __UpperCamelCase : Union[str, Any] =init_xavier_std __UpperCamelCase : Any =use_auxiliary_loss __UpperCamelCase : Tuple =feature_strides __UpperCamelCase : Dict =output_auxiliary_logits __UpperCamelCase : Union[str, Any] =decoder_layers super().__init__(**lowerCamelCase__ ) @classmethod def __lowercase ( cls , lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" return cls( backbone_config=lowerCamelCase__ , **lowerCamelCase__ , ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =copy.deepcopy(self.__dict__ ) __UpperCamelCase : List[Any] =self.backbone_config.to_dict() __UpperCamelCase : Union[str, Any] =self.__class__.model_type return output
357
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return f'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy' def __lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() def __lowercase ( self , lowerCamelCase__=0 , lowerCamelCase__=(4, 4, 64, 64) , lowerCamelCase__=False ): """simple docstring""" __UpperCamelCase : str =jnp.bfloataa if fpaa else jnp.floataa __UpperCamelCase : Optional[Any] =jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__ , lowerCamelCase__ ) ) , dtype=lowerCamelCase__ ) return image def __lowercase ( self , lowerCamelCase__=False , lowerCamelCase__="CompVis/stable-diffusion-v1-4" ): """simple docstring""" __UpperCamelCase : List[Any] =jnp.bfloataa if fpaa else jnp.floataa __UpperCamelCase : Optional[int] ='bf16' if fpaa else None __UpperCamelCase , __UpperCamelCase : Any =FlaxUNetaDConditionModel.from_pretrained( lowerCamelCase__ , subfolder='unet' , dtype=lowerCamelCase__ , revision=lowerCamelCase__ ) return model, params def __lowercase ( self , lowerCamelCase__=0 , lowerCamelCase__=(4, 77, 768) , lowerCamelCase__=False ): """simple docstring""" __UpperCamelCase : str =jnp.bfloataa if fpaa else jnp.floataa __UpperCamelCase : Optional[int] =jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__ , lowerCamelCase__ ) ) , dtype=lowerCamelCase__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase , __UpperCamelCase : Dict =self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowerCamelCase__ ) __UpperCamelCase : Dict =self.get_latents(lowerCamelCase__ , fpaa=lowerCamelCase__ ) __UpperCamelCase : Optional[int] =self.get_encoder_hidden_states(lowerCamelCase__ , fpaa=lowerCamelCase__ ) __UpperCamelCase : List[str] =model.apply( {'params': params} , lowerCamelCase__ , jnp.array(lowerCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase__ , ).sample assert sample.shape == latents.shape __UpperCamelCase : List[str] =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __UpperCamelCase : int =jnp.array(lowerCamelCase__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase , __UpperCamelCase : Dict =self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =self.get_latents(lowerCamelCase__ , shape=(4, 4, 96, 96) , fpaa=lowerCamelCase__ ) __UpperCamelCase : int =self.get_encoder_hidden_states(lowerCamelCase__ , shape=(4, 77, 1024) , fpaa=lowerCamelCase__ ) __UpperCamelCase : str =model.apply( {'params': params} , lowerCamelCase__ , jnp.array(lowerCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase__ , ).sample assert sample.shape == latents.shape __UpperCamelCase : int =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __UpperCamelCase : Optional[Any] =jnp.array(lowerCamelCase__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 )
245
0
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py lowercase_ = "src/transformers" lowercase_ = "docs/source/en" lowercase_ = "." def lowerCAmelCase (__A , __A , __A): """simple docstring""" with open(__A , '''r''' , encoding='''utf-8''' , newline='''\n''') as f: _a = f.readlines() # Find the start prompt. _a = 0 while not lines[start_index].startswith(__A): start_index += 1 start_index += 1 _a = start_index while not lines[end_index].startswith(__A): end_index += 1 end_index -= 1 while len(lines[start_index]) <= 1: start_index += 1 while len(lines[end_index]) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index]), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | lowercase_ = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. lowercase_ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") lowercase_ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. lowercase_ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. lowercase_ = direct_transformers_import(TRANSFORMERS_PATH) def lowerCAmelCase (__A): """simple docstring""" _a = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __A) return [m.group(0) for m in matches] def lowerCAmelCase (__A , __A): """simple docstring""" _a = 2 if text == '''✅''' or text == '''❌''' else len(__A) _a = (width - text_length) // 2 _a = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCAmelCase (): """simple docstring""" _a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES _a = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } _a = {name: config.replace('''Config''' , '''''') for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. _a = collections.defaultdict(__A) _a = collections.defaultdict(__A) _a = collections.defaultdict(__A) _a = collections.defaultdict(__A) _a = collections.defaultdict(__A) # Let's lookup through all transformers object (once). for attr_name in dir(__A): _a = None if attr_name.endswith('''Tokenizer'''): _a = slow_tokenizers _a = attr_name[:-9] elif attr_name.endswith('''TokenizerFast'''): _a = fast_tokenizers _a = attr_name[:-13] elif _re_tf_models.match(__A) is not None: _a = tf_models _a = _re_tf_models.match(__A).groups()[0] elif _re_flax_models.match(__A) is not None: _a = flax_models _a = _re_flax_models.match(__A).groups()[0] elif _re_pt_models.match(__A) is not None: _a = pt_models _a = _re_pt_models.match(__A).groups()[0] if lookup_dict is not None: while len(__A) > 0: if attr_name in model_name_to_prefix.values(): _a = True break # Try again after removing the last word in the name _a = ''''''.join(camel_case_split(__A)[:-1]) # Let's build that table! _a = list(model_name_to_config.keys()) model_names.sort(key=str.lower) _a = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). _a = [len(__A) + 2 for c in columns] _a = max([len(__A) for name in model_names]) + 2 # Build the table per se _a = '''|''' + '''|'''.join([_center_text(__A , __A) for c, w in zip(__A , __A)]) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths]) + "|\n" _a = {True: '''✅''', False: '''❌'''} for name in model_names: _a = model_name_to_prefix[name] _a = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__A , __A) for l, w in zip(__A , __A)]) + "|\n" return table def lowerCAmelCase (__A=False): """simple docstring""" _a , _a , _a , _a = _find_text_in_file( filename=os.path.join(__A , '''index.md''') , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) _a = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__A , '''index.md''') , '''w''' , encoding='''utf-8''' , newline='''\n''') as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:]) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''') if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") lowercase_ = parser.parse_args() check_model_table(args.fix_and_overwrite)
211
'''simple docstring''' from collections.abc import Generator from math import sin def lowerCAmelCase (__A): """simple docstring""" if len(__A) != 32: raise ValueError('''Input must be of length 32''') _a = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowerCAmelCase (__A): """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''') _a = format(__A , '''08x''')[-8:] _a = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''') return little_endian_hex def lowerCAmelCase (__A): """simple docstring""" _a = b'''''' for char in message: bit_string += format(__A , '''08b''').encode('''utf-8''') _a = format(len(__A) , '''064b''').encode('''utf-8''') # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__A) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32]) return bit_string def lowerCAmelCase (__A): """simple docstring""" if len(__A) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''') for pos in range(0 , len(__A) , 512): _a = bit_string[pos : pos + 512] _a = [] for i in range(0 , 512 , 32): block_words.append(int(to_little_endian(block[i : i + 32]) , 2)) yield block_words def lowerCAmelCase (__A): """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''') _a = format(__A , '''032b''') _a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(__A , 2) def lowerCAmelCase (__A , __A): """simple docstring""" return (a + b) % 2**32 def lowerCAmelCase (__A , __A): """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''') if shift < 0: raise ValueError('''Shift must be non-negative''') return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowerCAmelCase (__A): """simple docstring""" _a = preprocess(__A) _a = [int(2**32 * abs(sin(i + 1))) for i in range(64)] # Starting states _a = 0x67_452_301 _a = 0xEF_CDA_B89 _a = 0x98_BAD_CFE _a = 0x10_325_476 _a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__A): _a = aa _a = ba _a = ca _a = da # Hash current chunk for i in range(64): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _a = d ^ (b & (c ^ d)) _a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _a = c ^ (d & (b ^ c)) _a = (5 * i + 1) % 16 elif i <= 47: _a = b ^ c ^ d _a = (3 * i + 5) % 16 else: _a = c ^ (b | not_aa(__A)) _a = (7 * i) % 16 _a = (f + a + added_consts[i] + block_words[g]) % 2**32 _a = d _a = c _a = b _a = sum_aa(__A , left_rotate_aa(__A , shift_amounts[i])) # Add hashed chunk to running total _a = sum_aa(__A , __A) _a = sum_aa(__A , __A) _a = sum_aa(__A , __A) _a = sum_aa(__A , __A) _a = reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A) return digest if __name__ == "__main__": import doctest doctest.testmod()
211
1
"""simple docstring""" def _A ( UpperCamelCase_ : str, UpperCamelCase_ : list[str]) -> str: '''simple docstring''' __lowercase = "" for word_or_phrase in separated: if not isinstance(UpperCamelCase_, UpperCamelCase_): raise Exception("join() accepts only strings to be joined") joined += word_or_phrase + separator return joined.strip(UpperCamelCase_) if __name__ == "__main__": from doctest import testmod testmod()
144
"""simple docstring""" import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration _a = pytest.mark.integration _a = {'comet'} _a = importlib.util.find_spec('fairseq') is not None _a = {'code_eval'} _a = os.name == 'nt' _a = {'bertscore', 'frugalscore', 'perplexity'} _a = importlib.util.find_spec('transformers') is not None def _A ( UpperCamelCase_ : Dict) -> Any: '''simple docstring''' @wraps(UpperCamelCase_) def wrapper(self : Dict, UpperCamelCase_ : Dict): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"") else: test_case(self, UpperCamelCase_) return wrapper def _A ( UpperCamelCase_ : Dict) -> int: '''simple docstring''' @wraps(UpperCamelCase_) def wrapper(self : int, UpperCamelCase_ : str): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"") else: test_case(self, UpperCamelCase_) return wrapper def _A ( UpperCamelCase_ : Tuple) -> str: '''simple docstring''' @wraps(UpperCamelCase_) def wrapper(self : Optional[int], UpperCamelCase_ : Optional[Any]): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"") else: test_case(self, UpperCamelCase_) return wrapper def _A ( ) -> str: '''simple docstring''' __lowercase = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./metrics/*/")] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowercase ,lowercase ,lowercase ) @local class _lowerCAmelCase ( parameterized.TestCase ): """simple docstring""" __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Tuple = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" ) def _lowercase ( self : Dict, UpperCAmelCase__ : int ): __lowercase = "[...]" __lowercase = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics", UpperCAmelCase__ ) ).module_path ) __lowercase = datasets.load.import_main_class(metric_module.__name__, dataset=UpperCAmelCase__ ) # check parameters __lowercase = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(UpperCAmelCase__, metric_module.__name__ ): with self.use_local_metrics(): try: __lowercase = doctest.testmod(UpperCAmelCase__, verbose=UpperCAmelCase__, raise_on_error=UpperCAmelCase__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed, 0 ) self.assertGreater(results.attempted, 1 ) @slow def _lowercase ( self : List[Any], UpperCAmelCase__ : Optional[Any] ): __lowercase = "[...]" __lowercase = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics", UpperCAmelCase__ ) ).module_path ) # run doctest with self.use_local_metrics(): __lowercase = doctest.testmod(UpperCAmelCase__, verbose=UpperCAmelCase__, raise_on_error=UpperCAmelCase__ ) self.assertEqual(results.failed, 0 ) self.assertGreater(results.attempted, 1 ) @contextmanager def _lowercase ( self : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Tuple ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCAmelCase__ ): yield else: yield @contextmanager def _lowercase ( self : List[Any] ): def load_local_metric(UpperCAmelCase__ : Any, *UpperCAmelCase__ : List[Any], **UpperCAmelCase__ : Any ): return load_metric(os.path.join("metrics", UpperCAmelCase__ ), *UpperCAmelCase__, **UpperCAmelCase__ ) with patch("datasets.load_metric" ) as mock_load_metric: __lowercase = load_local_metric yield @classmethod def _lowercase ( cls : Optional[Any], UpperCAmelCase__ : List[Any] ): def wrapper(UpperCAmelCase__ : Tuple ): __lowercase = contextmanager(UpperCAmelCase__ ) __lowercase = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt") def _A ( UpperCamelCase_ : Any) -> Optional[Any]: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv", "", "") # handle pytest cli flags class _lowerCAmelCase ( lowercase ): """simple docstring""" def _lowercase ( self : Tuple, UpperCAmelCase__ : Tuple ): assert len(input_dict["input_ids"] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor") as mock_create_predictor: __lowercase = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore") def _A ( UpperCamelCase_ : Tuple) -> int: '''simple docstring''' import torch def bert_cos_score_idf(UpperCamelCase_ : Tuple, UpperCamelCase_ : str, *UpperCamelCase_ : Optional[Any], **UpperCamelCase_ : Dict): return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_)) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model"), patch( "bert_score.scorer.bert_cos_score_idf") as mock_bert_cos_score_idf: __lowercase = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet") def _A ( UpperCamelCase_ : Tuple) -> List[Any]: '''simple docstring''' def load_from_checkpoint(UpperCamelCase_ : Tuple): class _lowerCAmelCase : """simple docstring""" def _lowercase ( self : str, UpperCAmelCase__ : int, *UpperCAmelCase__ : Dict, **UpperCAmelCase__ : Dict ): assert len(UpperCAmelCase__ ) == 2 __lowercase = [0.19, 0.92] return scores, sum(UpperCAmelCase__ ) / len(UpperCAmelCase__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model") as mock_download_model: __lowercase = None with patch("comet.load_from_checkpoint") as mock_load_from_checkpoint: __lowercase = load_from_checkpoint yield def _A ( ) -> Tuple: '''simple docstring''' __lowercase = load_metric(os.path.join("metrics", "seqeval")) __lowercase = "ERROR" __lowercase = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(UpperCamelCase_, match=re.escape(UpperCamelCase_)): metric.compute(predictions=[], references=[], scheme=UpperCamelCase_)
144
1
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __lowerCAmelCase ( unittest.TestCase ): __lowerCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __lowerCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def snake_case ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = TextaTextGenerationPipeline(model=_snake_case , tokenizer=_snake_case ) return generator, ["Something to write", "Something else"] def snake_case ( self , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = generator("""Something there""" ) self.assertEqual(_snake_case , [{"""generated_text""": ANY(_snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _lowerCAmelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_snake_case ) self.assertEqual( _snake_case , [ [{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}], [{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}], ] , ) _lowerCAmelCase = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case ) self.assertEqual( _snake_case , [ [{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}], [{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}], ] , ) with self.assertRaises(_snake_case ): generator(4 ) @require_torch def snake_case ( self ): """simple docstring""" _lowerCAmelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _lowerCAmelCase = generator("""Something there""" , do_sample=_snake_case ) self.assertEqual(_snake_case , [{"""generated_text""": """"""}] ) _lowerCAmelCase = 3 _lowerCAmelCase = generator( """Something there""" , num_return_sequences=_snake_case , num_beams=_snake_case , ) _lowerCAmelCase = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_snake_case , _snake_case ) _lowerCAmelCase = generator("""This is a test""" , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case ) self.assertEqual( _snake_case , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _lowerCAmelCase = generator.model.config.eos_token_id _lowerCAmelCase = """<pad>""" _lowerCAmelCase = generator( ["""This is a test""", """This is a second test"""] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , ) self.assertEqual( _snake_case , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def snake_case ( self ): """simple docstring""" _lowerCAmelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _lowerCAmelCase = generator("""Something there""" , do_sample=_snake_case ) self.assertEqual(_snake_case , [{"""generated_text""": """"""}] )
82
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class SCREAMING_SNAKE_CASE__ : def __init__( self , a , a=13 , a=7 , a=False , a=True , a=False , a=False , a=19 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ): lowercase__ : Optional[Any] = parent lowercase__ : Dict = batch_size lowercase__ : Union[str, Any] = seq_length lowercase__ : Optional[Any] = is_training lowercase__ : Tuple = use_input_mask lowercase__ : List[str] = use_token_type_ids lowercase__ : Optional[Any] = use_labels lowercase__ : List[str] = vocab_size lowercase__ : Optional[int] = hidden_size lowercase__ : List[str] = num_hidden_layers lowercase__ : Any = num_attention_heads lowercase__ : int = intermediate_size lowercase__ : Any = hidden_act lowercase__ : Any = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : List[Any] = max_position_embeddings lowercase__ : int = type_vocab_size lowercase__ : List[Any] = type_sequence_label_size lowercase__ : str = initializer_range lowercase__ : List[str] = num_labels lowercase__ : Union[str, Any] = num_choices lowercase__ : Optional[int] = scope def snake_case_ ( self): lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : List[Any] = None if self.use_input_mask: lowercase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ : int = None lowercase__ : Optional[int] = None lowercase__ : Optional[int] = None if self.use_labels: lowercase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase__ : str = ids_tensor([self.batch_size] , self.num_choices) lowercase__ : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_ ( self): lowercase__ : str = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=a , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def snake_case_ ( self , a , a , a , a , a , a): lowercase__ : Dict = EsmForProteinFolding(config=a).float() model.to(a) model.eval() lowercase__ : Union[str, Any] = model(a , attention_mask=a) lowercase__ : Dict = model(a) lowercase__ : int = model(a) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3)) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2)) def snake_case_ ( self): lowercase__ : List[str] = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : int = config_and_inputs lowercase__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , unittest.TestCase ): __lowerCamelCase : Dict = False __lowerCamelCase : Dict = (EsmForProteinFolding,) if is_torch_available() else () __lowerCamelCase : Union[str, Any] = () __lowerCamelCase : List[Any] = {} if is_torch_available() else {} __lowerCamelCase : Optional[Any] = False def snake_case_ ( self): lowercase__ : Tuple = EsmFoldModelTester(self) lowercase__ : List[Any] = ConfigTester(self , config_class=a , hidden_size=37) def snake_case_ ( self): self.config_tester.run_common_tests() def snake_case_ ( self): lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a) @unittest.skip('Does not support attention outputs') def snake_case_ ( self): pass @unittest.skip def snake_case_ ( self): pass @unittest.skip('Esm does not support embedding resizing') def snake_case_ ( self): pass @unittest.skip('Esm does not support embedding resizing') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support passing input embeds!') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support head pruning.') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support head pruning.') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support head pruning.') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support head pruning.') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support head pruning.') def snake_case_ ( self): pass @unittest.skip('ESMFold does not output hidden states in the normal way.') def snake_case_ ( self): pass @unittest.skip('ESMfold does not output hidden states in the normal way.') def snake_case_ ( self): pass @unittest.skip('ESMFold only has one output format.') def snake_case_ ( self): pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support input chunking.') def snake_case_ ( self): pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.') def snake_case_ ( self): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.') def snake_case_ ( self): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.') def snake_case_ ( self): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.') def snake_case_ ( self): pass @unittest.skip('ESMFold doesn\'t support data parallel.') def snake_case_ ( self): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def snake_case_ ( self): pass @require_torch class SCREAMING_SNAKE_CASE__ (__snake_case ): @slow def snake_case_ ( self): lowercase__ : Dict = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1').float() model.eval() lowercase__ : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) lowercase__ : Optional[int] = model(a)['positions'] lowercase__ : Dict = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , a , atol=1e-4))
214
0
"""simple docstring""" import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py SCREAMING_SNAKE_CASE : Any = """src/transformers""" SCREAMING_SNAKE_CASE : Dict = """docs/source/en""" SCREAMING_SNAKE_CASE : List[str] = """.""" def lowercase ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple ) ->Optional[Any]: """simple docstring""" with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __snake_case : List[Any] = f.readlines() # Find the start prompt. __snake_case : Dict = 0 while not lines[start_index].startswith(_snake_case ): start_index += 1 start_index += 1 __snake_case : Optional[int] = start_index while not lines[end_index].startswith(_snake_case ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | SCREAMING_SNAKE_CASE : Optional[int] = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. SCREAMING_SNAKE_CASE : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowercase ( _snake_case : Any ) ->Optional[int]: """simple docstring""" __snake_case : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _snake_case ) return [m.group(0 ) for m in matches] def lowercase ( _snake_case : Dict , _snake_case : List[str] ) ->List[Any]: """simple docstring""" __snake_case : Optional[int] = 2 if text == '''✅''' or text == '''❌''' else len(_snake_case ) __snake_case : str = (width - text_length) // 2 __snake_case : List[str] = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowercase ( ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __snake_case : Optional[int] = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } __snake_case : Union[str, Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. __snake_case : List[Any] = collections.defaultdict(_snake_case ) __snake_case : List[Any] = collections.defaultdict(_snake_case ) __snake_case : Optional[Any] = collections.defaultdict(_snake_case ) __snake_case : Optional[int] = collections.defaultdict(_snake_case ) __snake_case : Any = collections.defaultdict(_snake_case ) # Let's lookup through all transformers object (once). for attr_name in dir(_snake_case ): __snake_case : List[str] = None if attr_name.endswith('''Tokenizer''' ): __snake_case : Tuple = slow_tokenizers __snake_case : Union[str, Any] = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): __snake_case : List[Any] = fast_tokenizers __snake_case : Optional[int] = attr_name[:-13] elif _re_tf_models.match(_snake_case ) is not None: __snake_case : List[Any] = tf_models __snake_case : int = _re_tf_models.match(_snake_case ).groups()[0] elif _re_flax_models.match(_snake_case ) is not None: __snake_case : Tuple = flax_models __snake_case : Optional[int] = _re_flax_models.match(_snake_case ).groups()[0] elif _re_pt_models.match(_snake_case ) is not None: __snake_case : Optional[Any] = pt_models __snake_case : List[Any] = _re_pt_models.match(_snake_case ).groups()[0] if lookup_dict is not None: while len(_snake_case ) > 0: if attr_name in model_name_to_prefix.values(): __snake_case : Optional[int] = True break # Try again after removing the last word in the name __snake_case : Dict = ''''''.join(camel_case_split(_snake_case )[:-1] ) # Let's build that table! __snake_case : int = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) __snake_case : List[Any] = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). __snake_case : int = [len(_snake_case ) + 2 for c in columns] __snake_case : List[Any] = max([len(_snake_case ) for name in model_names] ) + 2 # Build the table per se __snake_case : Optional[int] = '''|''' + '''|'''.join([_center_text(_snake_case , _snake_case ) for c, w in zip(_snake_case , _snake_case )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" __snake_case : Optional[int] = {True: '''✅''', False: '''❌'''} for name in model_names: __snake_case : Optional[Any] = model_name_to_prefix[name] __snake_case : Optional[Any] = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(_snake_case , _snake_case ) for l, w in zip(_snake_case , _snake_case )] ) + "|\n" return table def lowercase ( _snake_case : List[str]=False ) ->Dict: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : int = _find_text_in_file( filename=os.path.join(_snake_case , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) __snake_case : Optional[Any] = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(_snake_case , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() check_model_table(args.fix_and_overwrite)
24
"""simple docstring""" from collections.abc import Callable def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: __snake_case : List[str] = mid else: __snake_case : str = mid __snake_case : str = start + (end - start) / 2.0 return mid def lowercase ( _snake_case : float ) ->float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
24
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: A_ = None A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''', }, } A_ = { '''google/fnet-base''': 5_12, '''google/fnet-large''': 5_12, } A_ = '''▁''' class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "token_type_ids"] lowercase__ = FNetTokenizer def __init__( self: List[str], a_: str=None, a_: Optional[Any]=None, a_: Tuple=False, a_: Any=True, a_: List[str]=True, a_: List[Any]="<unk>", a_: Optional[Any]="[SEP]", a_: Optional[int]="<pad>", a_: Optional[Any]="[CLS]", a_: int="[MASK]", **a_: Optional[Any], ): '''simple docstring''' _snake_case : str = ( AddedToken(a_, lstrip=a_, rstrip=a_, normalized=a_ ) if isinstance(a_, a_ ) else mask_token ) super().__init__( a_, tokenizer_file=a_, do_lower_case=a_, remove_space=a_, keep_accents=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, **a_, ) _snake_case : Union[str, Any] = do_lower_case _snake_case : Dict = remove_space _snake_case : int = keep_accents _snake_case : Dict = vocab_file _snake_case : str = False if not self.vocab_file else True def UpperCamelCase_ ( self: Optional[Any], a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : List[Any] = [self.sep_token_id] _snake_case : Optional[int] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self: Union[str, Any], a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : Any = [self.sep_token_id] _snake_case : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self: List[str], a_: str, a_: Optional[str] = None ): '''simple docstring''' if not os.path.isdir(a_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _snake_case : List[Any] = os.path.join( a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file, a_ ) return (out_vocab_file,)
64
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A( a , a , unittest.TestCase ): snake_case_ = AutoencoderKL snake_case_ = '''sample''' snake_case_ = 1E-2 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = 4 __a = 3 __a = (32, 32) __a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case ) return {"sample": image} @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return (3, 32, 32) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } __a = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.model_class(**_snake_case ) model.to(_snake_case ) assert not model.is_gradient_checkpointing and model.training __a = model(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __a = torch.randn_like(_snake_case ) __a = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __a = self.model_class(**_snake_case ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_snake_case ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __a = model_a(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __a = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __a = dict(model.named_parameters() ) __a = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_snake_case ) __a = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) __a = model.to(_snake_case ) model.eval() if torch_device == "mps": __a = torch.manual_seed(0 ) else: __a = torch.Generator(device=_snake_case ).manual_seed(0 ) __a = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __a = image.to(_snake_case ) with torch.no_grad(): __a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample __a = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __a = torch.tensor( [ -4.0_078E-01, -3.8_323E-04, -1.2_681E-01, -1.1_462E-01, 2.0_095E-01, 1.0_893E-01, -8.8_247E-02, -3.0_361E-01, -9.8_644E-03, ] ) elif torch_device == "cpu": __a = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: __a = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) ) @slow class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy""" def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any: '''simple docstring''' __a = torch.floataa if fpaa else torch.floataa __a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case ) return image def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]: '''simple docstring''' __a = '''fp16''' if fpaa else None __a = torch.floataa if fpaa else torch.floataa __a = AutoencoderKL.from_pretrained( _snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , ) model.to(_snake_case ).eval() return model def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(_snake_case ) return torch.Generator(device=_snake_case ).manual_seed(_snake_case ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , fpaa=_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) with torch.no_grad(): __a = model(_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model.encode(_snake_case ).latent_dist __a = dist.sample(generator=_snake_case ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __a = sample[0, -1, -3:, -3:].flatten().cpu() __a = torch.tensor(_snake_case ) __a = 3E-3 if torch_device != '''mps''' else 1E-2 assert torch_all_close(_snake_case , _snake_case , atol=_snake_case )
6
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Optional[Any]: snake_case_ = inspect.getfile(accelerate.test_utils) snake_case_ = os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ['scripts', 'external_deps', 'test_metrics.py']) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 snake_case_ = test_metrics @require_cpu def a_ ( self) -> Any: debug_launcher(self.test_metrics.main, num_processes=1) @require_cpu def a_ ( self) -> List[Any]: debug_launcher(self.test_metrics.main) @require_single_gpu def a_ ( self) -> int: self.test_metrics.main() @require_multi_gpu def a_ ( self) -> Optional[int]: print(f'Found {torch.cuda.device_count()} devices.') snake_case_ = ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(lowerCAmelCase__, env=os.environ.copy())
312
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["keras_nlp"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int: requires_backends(self, ['keras_nlp'])
312
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase_ : int = logging.get_logger(__name__) UpperCAmelCase_ : Dict = { 'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': ( 'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class lowercase__ ( _snake_case ): '''simple docstring''' A_ : Optional[Any] = """trajectory_transformer""" A_ : Optional[Any] = ["""past_key_values"""] A_ : str = { """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __snake_case=100 , __snake_case=5 , __snake_case=1 , __snake_case=1 , __snake_case=249 , __snake_case=6 , __snake_case=17 , __snake_case=25 , __snake_case=4 , __snake_case=4 , __snake_case=128 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.0006 , __snake_case=512 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=1 , __snake_case=True , __snake_case=1 , __snake_case=5_0256 , __snake_case=5_0256 , **__snake_case , ): _SCREAMING_SNAKE_CASE : str = vocab_size _SCREAMING_SNAKE_CASE : List[Any] = action_weight _SCREAMING_SNAKE_CASE : List[str] = reward_weight _SCREAMING_SNAKE_CASE : List[str] = value_weight _SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings _SCREAMING_SNAKE_CASE : List[str] = block_size _SCREAMING_SNAKE_CASE : str = action_dim _SCREAMING_SNAKE_CASE : str = observation_dim _SCREAMING_SNAKE_CASE : Union[str, Any] = transition_dim _SCREAMING_SNAKE_CASE : int = learning_rate _SCREAMING_SNAKE_CASE : int = n_layer _SCREAMING_SNAKE_CASE : Optional[int] = n_head _SCREAMING_SNAKE_CASE : int = n_embd _SCREAMING_SNAKE_CASE : List[str] = embd_pdrop _SCREAMING_SNAKE_CASE : List[Any] = attn_pdrop _SCREAMING_SNAKE_CASE : List[Any] = resid_pdrop _SCREAMING_SNAKE_CASE : Optional[int] = initializer_range _SCREAMING_SNAKE_CASE : Dict = layer_norm_eps _SCREAMING_SNAKE_CASE : str = kaiming_initializer_range _SCREAMING_SNAKE_CASE : Optional[int] = use_cache super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
200
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class lowercase__ ( _snake_case ): '''simple docstring''' A_ : str = """big_bird""" def __init__( self , __snake_case=5_0358 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu_new" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=4096 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=True , __snake_case=0 , __snake_case=1 , __snake_case=2 , __snake_case=66 , __snake_case="block_sparse" , __snake_case=True , __snake_case=False , __snake_case=64 , __snake_case=3 , __snake_case=None , **__snake_case , ): super().__init__( pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , sep_token_id=__snake_case , **__snake_case , ) _SCREAMING_SNAKE_CASE : str = vocab_size _SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings _SCREAMING_SNAKE_CASE : List[str] = hidden_size _SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers _SCREAMING_SNAKE_CASE : Any = num_attention_heads _SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size _SCREAMING_SNAKE_CASE : List[Any] = hidden_act _SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Tuple = initializer_range _SCREAMING_SNAKE_CASE : Any = type_vocab_size _SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps _SCREAMING_SNAKE_CASE : List[Any] = use_cache _SCREAMING_SNAKE_CASE : List[Any] = rescale_embeddings _SCREAMING_SNAKE_CASE : Union[str, Any] = attention_type _SCREAMING_SNAKE_CASE : Union[str, Any] = use_bias _SCREAMING_SNAKE_CASE : int = block_size _SCREAMING_SNAKE_CASE : Any = num_random_blocks _SCREAMING_SNAKE_CASE : List[str] = classifier_dropout class lowercase__ ( _snake_case ): '''simple docstring''' @property def UpperCAmelCase_ ( self ): if self.task == "multiple-choice": _SCREAMING_SNAKE_CASE : int = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
200
1
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase : List[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , **_a ): """simple docstring""" super().__init__(**_a ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , _a , **_a ): """simple docstring""" return super().__call__(_a , **_a ) def _lowerCAmelCase ( self , **_a ): """simple docstring""" lowerCamelCase = {} if "candidate_labels" in kwargs: lowerCamelCase = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCamelCase = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def _lowerCAmelCase ( self , _a , _a=None , _a="This is a photo of {}." ): """simple docstring""" lowerCamelCase = load_image(_a ) lowerCamelCase = self.image_processor(images=[image] , return_tensors=self.framework ) lowerCamelCase = candidate_labels lowerCamelCase = [hypothesis_template.format(_a ) for x in candidate_labels] lowerCamelCase = self.tokenizer(_a , return_tensors=self.framework , padding=_a ) lowerCamelCase = [text_inputs] return inputs def _lowerCAmelCase ( self , _a ): """simple docstring""" lowerCamelCase = model_inputs.pop("""candidate_labels""" ) lowerCamelCase = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , _a ): lowerCamelCase = text_inputs[0] else: # Batching case. lowerCamelCase = text_inputs[0][0] lowerCamelCase = self.model(**_a , **_a ) lowerCamelCase = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def _lowerCAmelCase ( self , _a ): """simple docstring""" lowerCamelCase = model_outputs.pop("""candidate_labels""" ) lowerCamelCase = model_outputs["""logits"""][0] if self.framework == "pt": lowerCamelCase = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCamelCase = probs.tolist() if not isinstance(_a , _a ): lowerCamelCase = [scores] elif self.framework == "tf": lowerCamelCase = stable_softmax(_a , axis=-1 ) lowerCamelCase = probs.numpy().tolist() else: raise ValueError(f'Unsupported framework: {self.framework}' ) lowerCamelCase = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] ) ] return result
361
"""simple docstring""" import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCAmelCase : Dict = 16 lowerCAmelCase : int = 32 def a__ ( snake_case__ ) -> Optional[Any]: return int(x / 2**20 ) class __magic_name__ : '''simple docstring''' def __enter__( self ): """simple docstring""" gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowerCamelCase = torch.cuda.memory_allocated() return self def __exit__( self , *_a ): """simple docstring""" gc.collect() torch.cuda.empty_cache() lowerCamelCase = torch.cuda.memory_allocated() lowerCamelCase = torch.cuda.max_memory_allocated() lowerCamelCase = bamb(self.end - self.begin ) lowerCamelCase = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def a__ ( snake_case__ , snake_case__ = 16 , snake_case__ = "bert-base-cased" , snake_case__ = 3_20 , snake_case__ = 1_60 , ) -> List[str]: lowerCamelCase = AutoTokenizer.from_pretrained(snake_case__ ) lowerCamelCase = load_dataset( """glue""" , """mrpc""" , split={"""train""": F'train[:{n_train}]', """validation""": F'validation[:{n_val}]'} ) def tokenize_function(snake_case__ ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCamelCase = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" ) return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCamelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) lowerCamelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader def a__ ( snake_case__ , snake_case__ ) -> Any: # Initialize accelerator lowerCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase = config["""lr"""] lowerCamelCase = int(config["""num_epochs"""] ) lowerCamelCase = int(config["""seed"""] ) lowerCamelCase = int(config["""batch_size"""] ) lowerCamelCase = args.model_name_or_path set_seed(snake_case__ ) lowerCamelCase , lowerCamelCase = get_dataloaders(snake_case__ , snake_case__ , snake_case__ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ ) # Instantiate optimizer lowerCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCamelCase = optimizer_cls(params=model.parameters() , lr=snake_case__ ) if accelerator.state.deepspeed_plugin is not None: lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowerCamelCase = 1 lowerCamelCase = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCamelCase = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , ) else: lowerCamelCase = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # We need to keep track of how many total steps we have iterated over lowerCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly lowerCamelCase = 0 # Now we train the model lowerCamelCase = {} for epoch in range(snake_case__ , snake_case__ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(snake_case__ ): lowerCamelCase = model(**snake_case__ ) lowerCamelCase = outputs.loss lowerCamelCase = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) ) accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) ) accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) ) accelerator.print( """Total Peak Memory consumed during the train (max): {}""".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f: json.dump(snake_case__ , snake_case__ ) def a__ ( ) -> str: lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=snake_case__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case__ , ) parser.add_argument( """--output_dir""" , type=snake_case__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--peak_memory_upper_bound""" , type=snake_case__ , default=snake_case__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , ) parser.add_argument( """--n_train""" , type=snake_case__ , default=3_20 , help="""Number of training examples to use.""" , ) parser.add_argument( """--n_val""" , type=snake_case__ , default=1_60 , help="""Number of validation examples to use.""" , ) parser.add_argument( """--num_epochs""" , type=snake_case__ , default=1 , help="""Number of train epochs.""" , ) lowerCamelCase = parser.parse_args() lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
168
0
'''simple docstring''' import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() a : Dict = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = MobileNetVaConfig(layer_norm_eps=0.0_0_1 ) if "_quant" in model_name: raise ValueError("Quantized models are not supported." ) UpperCAmelCase : Tuple = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , __magic_name__ ) if matches: UpperCAmelCase : int = float(matches[1] ) UpperCAmelCase : Union[str, Any] = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". UpperCAmelCase : str = 1001 UpperCAmelCase : int = "imagenet-1k-id2label.json" UpperCAmelCase : Tuple = "huggingface/label-files" UpperCAmelCase : List[str] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase : Optional[int] = {int(__magic_name__ ) + 1: v for k, v in idalabel.items()} UpperCAmelCase : Optional[int] = "background" UpperCAmelCase : int = idalabel UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} return config def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase : Union[str, Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ): '''simple docstring''' UpperCAmelCase : Optional[Any] = get_mobilenet_va_config(__magic_name__ ) # Load 🤗 model UpperCAmelCase : Tuple = MobileNetVaForImageClassification(__magic_name__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(__magic_name__ , __magic_name__ , __magic_name__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor UpperCAmelCase : Optional[int] = MobileNetVaImageProcessor( crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , ) UpperCAmelCase : Optional[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCAmelCase : Union[str, Any] = model(**__magic_name__ ) UpperCAmelCase : Tuple = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": UpperCAmelCase : int = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ) elif model_name == "mobilenet_v1_0.75_192": UpperCAmelCase : Tuple = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] ) else: UpperCAmelCase : Optional[int] = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: print("Pushing to the hub..." ) UpperCAmelCase : Tuple = "google/" + model_name image_processor.push_to_hub(__magic_name__ ) model.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="mobilenet_v1_1.0_224", type=str, help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.", ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a : Optional[Any] = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
311
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a : Optional[int] = _symbol_database.Default() a : Any = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) a : Tuple = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: a : str = None a : Optional[Any] = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a : str = 45 a : Any = 15_81 a : List[Any] = 15_17 a : Union[str, Any] = 15_70 a : Optional[Any] = 15_84 a : List[str] = 17_93 a : Optional[Any] = 17_95 a : Tuple = 19_16 a : Optional[Any] = 18_64 a : int = 19_05 a : Optional[Any] = 19_19 a : Union[str, Any] = 24_29 a : List[Any] = 22_08 a : Dict = 24_18 a : Optional[int] = 23_23 a : str = 24_07 # @@protoc_insertion_point(module_scope)
311
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCamelCase : List[str] = logging.get_logger(__name__) lowerCamelCase : List[Any] = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class __lowercase (UpperCamelCase__ ): """simple docstring""" _snake_case = """marian""" _snake_case = ["""past_key_values"""] _snake_case = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , A=5_8_1_0_1 , A=None , A=1_0_2_4 , A=1_2 , A=4_0_9_6 , A=1_6 , A=1_2 , A=4_0_9_6 , A=1_6 , A=0.0 , A=0.0 , A=True , A=True , A="gelu" , A=1_0_2_4 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=5_8_1_0_0 , A=False , A=5_8_1_0_0 , A=0 , A=0 , A=True , **A , ) -> Optional[int]: snake_case : str = vocab_size snake_case : Optional[int] = decoder_vocab_size or vocab_size snake_case : Optional[Any] = max_position_embeddings snake_case : Optional[Any] = d_model snake_case : Optional[Any] = encoder_ffn_dim snake_case : int = encoder_layers snake_case : Tuple = encoder_attention_heads snake_case : str = decoder_ffn_dim snake_case : Tuple = decoder_layers snake_case : str = decoder_attention_heads snake_case : List[Any] = dropout snake_case : str = attention_dropout snake_case : Union[str, Any] = activation_dropout snake_case : str = activation_function snake_case : Optional[int] = init_std snake_case : List[str] = encoder_layerdrop snake_case : Optional[int] = decoder_layerdrop snake_case : Dict = use_cache snake_case : Dict = encoder_layers snake_case : Dict = scale_embedding # scale factor will be sqrt(d_model) if True snake_case : Dict = share_encoder_decoder_embeddings super().__init__( pad_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , forced_eos_token_id=A , **A , ) class __lowercase (UpperCamelCase__ ): """simple docstring""" @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: snake_case : Optional[Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: snake_case : str = {0: """batch"""} snake_case : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: snake_case : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""} snake_case : List[Any] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(A , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. snake_case : Tuple = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: snake_case , snake_case : Any = self.num_layers for i in range(A ): snake_case : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""} snake_case : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""} else: snake_case : Optional[int] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: snake_case : Optional[Any] = super().outputs else: snake_case : Any = super(A , self ).outputs if self.use_past: snake_case , snake_case : Any = self.num_layers for i in range(A ): snake_case : List[str] = {0: """batch""", 2: """past_sequence + sequence"""} snake_case : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]: snake_case : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder( A , A , A , A , A ) # Generate decoder inputs snake_case : Union[str, Any] = seq_length if not self.use_past else 1 snake_case : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder( A , A , A , A , A ) snake_case : str = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} snake_case : Tuple = dict(**A , **A ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch snake_case , snake_case : Optional[Any] = common_inputs["""input_ids"""].shape snake_case : str = common_inputs["""decoder_input_ids"""].shape[1] snake_case , snake_case : Union[str, Any] = self.num_attention_heads snake_case : Optional[int] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case : Union[str, Any] = decoder_seq_length + 3 snake_case : Optional[int] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) snake_case : Tuple = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(A , A )] , dim=1 ) snake_case : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered snake_case , snake_case : str = self.num_layers snake_case : Tuple = min(A , A ) snake_case : Tuple = max(A , A ) - min_num_layers snake_case : Optional[int] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(A ): common_inputs["past_key_values"].append( ( torch.zeros(A ), torch.zeros(A ), torch.zeros(A ), torch.zeros(A ), ) ) # TODO: test this. snake_case : Optional[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(A , A ): common_inputs["past_key_values"].append((torch.zeros(A ), torch.zeros(A )) ) return common_inputs def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]: snake_case : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder( A , A , A , A , A ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch snake_case , snake_case : Tuple = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values snake_case : Optional[Any] = seqlen + 2 snake_case , snake_case : int = self.num_layers snake_case , snake_case : Optional[Any] = self.num_attention_heads snake_case : Optional[int] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case : Dict = common_inputs["""attention_mask"""].dtype snake_case : Union[str, Any] = torch.cat( [common_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 ) snake_case : int = [ (torch.zeros(A ), torch.zeros(A )) for _ in range(A ) ] return common_inputs def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX snake_case : Tuple = compute_effective_axis_dimension( A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX snake_case : Any = tokenizer.num_special_tokens_to_add(A ) snake_case : Any = compute_effective_axis_dimension( A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A ) # Generate dummy inputs according to compute batch and sequence snake_case : Optional[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size snake_case : Union[str, Any] = dict(tokenizer(A , return_tensors=A ) ) return common_inputs def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: snake_case : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm( A , batch_size=A , seq_length=A , is_pair=A , framework=A ) else: snake_case : List[Any] = self._generate_dummy_inputs_for_causal_lm( A , batch_size=A , seq_length=A , is_pair=A , framework=A ) return common_inputs def UpperCAmelCase ( self , A , A , A , A ) -> List[Any]: if self.task in ["default", "seq2seq-lm"]: snake_case : str = super()._flatten_past_key_values_(A , A , A , A ) else: snake_case : Optional[int] = super(A , self )._flatten_past_key_values_( A , A , A , A ) @property def UpperCAmelCase ( self ) -> float: return 1e-4
176
import warnings from functools import wraps from typing import Callable def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Callable: @wraps(lowercase ) def _inner_fn(*lowercase ,**lowercase ): warnings.warn( (f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") ,lowercase ,) return fn(*lowercase ,**lowercase ) return _inner_fn
176
1
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME _lowerCAmelCase :int = ['small', 'medium', 'large'] _lowerCAmelCase :int = 'lm_head.decoder.weight' _lowerCAmelCase :Dict = 'lm_head.weight' def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ): _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ ) _UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": _lowerCAmelCase :Dict = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) _lowerCAmelCase :str = parser.parse_args() for MODEL in DIALOGPT_MODELS: _lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") _lowerCAmelCase :int = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
263
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _lowerCAmelCase :str = object() # For specifying empty leaf dict `{}` _lowerCAmelCase :str = object() def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ): _UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def lowerCamelCase_ (UpperCamelCase__ : List[str] ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def lowerCamelCase_ (): return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def lowerCamelCase_ (UpperCamelCase__ : str ): _UpperCAmelCase : List[str] = _get_partition_rules() _UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
263
1
"""simple docstring""" from typing import Any def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): _validation( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) # Creates data structures and fill initial step UpperCAmelCase = {} UpperCAmelCase = {} for state in states_space: UpperCAmelCase = observations_space[0] UpperCAmelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) UpperCAmelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(lowercase_ ) ): UpperCAmelCase = observations_space[o] UpperCAmelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function UpperCAmelCase = '' UpperCAmelCase = -1 for k_state in states_space: UpperCAmelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: UpperCAmelCase = probability UpperCAmelCase = k_state # Update probabilities and pointers dicts UpperCAmelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) UpperCAmelCase = arg_max # The final observation UpperCAmelCase = observations_space[len(lowercase_ ) - 1] # argmax for given final observation UpperCAmelCase = '' UpperCAmelCase = -1 for k_state in states_space: UpperCAmelCase = probabilities[(k_state, final_observation)] if probability > max_probability: UpperCAmelCase = probability UpperCAmelCase = k_state UpperCAmelCase = arg_max # Process pointers backwards UpperCAmelCase = last_state UpperCAmelCase = [] for o in range(len(lowercase_ ) - 1 , -1 , -1 ): result.append(lowercase_ ) UpperCAmelCase = pointers[previous, observations_space[o]] result.reverse() return result def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): _validate_not_empty( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) _validate_lists(lowercase_ , lowercase_ ) _validate_dicts( lowercase_ , lowercase_ , lowercase_ ) def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('There\'s an empty parameter' ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): _validate_list(lowercase_ , 'observations_space' ) _validate_list(lowercase_ , 'states_space' ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): if not isinstance(_object , lowercase_ ): UpperCAmelCase = F"""{var_name} must be a list""" raise ValueError(lowercase_ ) else: for x in _object: if not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = F"""{var_name} must be a list of strings""" raise ValueError(lowercase_ ) def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , ): _validate_dict(lowercase_ , 'initial_probabilities' , lowercase_ ) _validate_nested_dict(lowercase_ , 'transition_probabilities' ) _validate_nested_dict(lowercase_ , 'emission_probabilities' ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): _validate_dict(_object , lowercase_ , lowercase_ ) for x in _object.values(): _validate_dict(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = False ): if not isinstance(_object , lowercase_ ): UpperCAmelCase = F"""{var_name} must be a dict""" raise ValueError(lowercase_ ) if not all(isinstance(lowercase_ , lowercase_ ) for x in _object ): UpperCAmelCase = F"""{var_name} all keys must be strings""" raise ValueError(lowercase_ ) if not all(isinstance(lowercase_ , lowercase_ ) for x in _object.values() ): UpperCAmelCase = 'nested dictionary ' if nested else '' UpperCAmelCase = F"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(lowercase_ ) if __name__ == "__main__": from doctest import testmod testmod()
181
"""simple docstring""" # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case_ = { """configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""], """tokenization_cpmant""": ["""CpmAntTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""", """CpmAntForCausalLM""", """CpmAntModel""", """CpmAntPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
181
1
'''simple docstring''' import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup _lowerCAmelCase = { '''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''' } def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "dhaka" , UpperCamelCase = 5 ): """simple docstring""" lowerCAmelCase__ : Tuple = min(UpperCamelCase , 50 ) # Prevent abuse! lowerCAmelCase__ : List[Any] = { """q""": query, """tbm""": """isch""", """hl""": """en""", """ijn""": """0""", } lowerCAmelCase__ : List[Any] = requests.get("""https://www.google.com/search""" , params=UpperCamelCase , headers=UpperCamelCase ) lowerCAmelCase__ : List[Any] = BeautifulSoup(html.text , """html.parser""" ) lowerCAmelCase__ : int = """""".join( re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) ) lowerCAmelCase__ : Dict = json.dumps(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = json.loads(UpperCamelCase ) lowerCAmelCase__ : Dict = re.findall( R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , UpperCamelCase , ) if not matched_google_image_data: return 0 lowerCAmelCase__ : Tuple = re.sub( R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(UpperCamelCase ) , ) lowerCAmelCase__ : Union[str, Any] = re.findall( R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , UpperCamelCase , ) for index, fixed_full_res_image in enumerate(UpperCamelCase ): if index >= max_images: return index lowerCAmelCase__ : Optional[int] = bytes(UpperCamelCase , """ascii""" ).decode( """unicode-escape""" ) lowerCAmelCase__ : Optional[Any] = bytes(UpperCamelCase , """ascii""" ).decode( """unicode-escape""" ) lowerCAmelCase__ : Tuple = urllib.request.build_opener() lowerCAmelCase__ : int = [ ( """User-Agent""", """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36""" """ (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""", ) ] urllib.request.install_opener(UpperCamelCase ) lowerCAmelCase__ : List[Any] = f"""query_{query.replace(' ' , '_' )}""" if not os.path.exists(UpperCamelCase ): os.makedirs(UpperCamelCase ) urllib.request.urlretrieve( # noqa: S310 UpperCamelCase , f"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: _lowerCAmelCase = download_images_from_google_query(sys.argv[1]) print(F"""{image_count} images were downloaded to disk.""") except IndexError: print('''Please provide a search term.''') raise
37
'''simple docstring''' from __future__ import annotations def snake_case ( UpperCAmelCase )-> list[int]: """simple docstring""" __A = 2 __A = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(UpperCAmelCase ) if n > 1: factors.append(UpperCAmelCase ) return factors if __name__ == "__main__": import doctest doctest.testmod()
161
0
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __A = datasets.logging.get_logger(__name__) __A = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' __A = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' __A = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' __A = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowercase ( datasets.Metric): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Dict ) -> List[Any]: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) UpperCAmelCase_= """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: UpperCAmelCase_= self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: UpperCAmelCase_= self.config_name.upper() else: raise KeyError( F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer UpperCAmelCase_= dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) UpperCAmelCase_= score.BleurtScorer(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ) -> str: UpperCAmelCase_= self.scorer.score(references=__UpperCAmelCase , candidates=__UpperCAmelCase ) return {"scores": scores}
277
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __A = datasets.utils.logging.get_logger(__name__) __A = ['''names''', '''prefix'''] __A = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] __A = ['''encoding_errors''', '''on_bad_lines'''] __A = ['''date_format'''] @dataclass class lowercase ( datasets.BuilderConfig): """simple docstring""" a__ : str = "," a__ : Optional[str] = None a__ : Optional[Union[int, List[int], str]] = "infer" a__ : Optional[List[str]] = None a__ : Optional[List[str]] = None a__ : Optional[Union[int, str, List[int], List[str]]] = None a__ : Optional[Union[List[int], List[str]]] = None a__ : Optional[str] = None a__ : bool = True a__ : Optional[Literal["c", "python", "pyarrow"]] = None a__ : Dict[Union[int, str], Callable[[Any], Any]] = None a__ : Optional[list] = None a__ : Optional[list] = None a__ : bool = False a__ : Optional[Union[int, List[int]]] = None a__ : Optional[int] = None a__ : Optional[Union[str, List[str]]] = None a__ : bool = True a__ : bool = True a__ : bool = False a__ : bool = True a__ : Optional[str] = None a__ : str = "." a__ : Optional[str] = None a__ : str = '"' a__ : int = 0 a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None a__ : bool = True a__ : bool = True a__ : int = 0 a__ : bool = True a__ : bool = False a__ : Optional[str] = None a__ : int = 1_0000 a__ : Optional[datasets.Features] = None a__ : Optional[str] = "strict" a__ : Literal["error", "warn", "skip"] = "error" a__ : Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: if self.delimiter is not None: UpperCAmelCase_= self.delimiter if self.column_names is not None: UpperCAmelCase_= self.column_names @property def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: UpperCAmelCase_= { """sep""": self.sep, """header""": self.header, """names""": self.names, """index_col""": self.index_col, """usecols""": self.usecols, """prefix""": self.prefix, """mangle_dupe_cols""": self.mangle_dupe_cols, """engine""": self.engine, """converters""": self.converters, """true_values""": self.true_values, """false_values""": self.false_values, """skipinitialspace""": self.skipinitialspace, """skiprows""": self.skiprows, """nrows""": self.nrows, """na_values""": self.na_values, """keep_default_na""": self.keep_default_na, """na_filter""": self.na_filter, """verbose""": self.verbose, """skip_blank_lines""": self.skip_blank_lines, """thousands""": self.thousands, """decimal""": self.decimal, """lineterminator""": self.lineterminator, """quotechar""": self.quotechar, """quoting""": self.quoting, """escapechar""": self.escapechar, """comment""": self.comment, """encoding""": self.encoding, """dialect""": self.dialect, """error_bad_lines""": self.error_bad_lines, """warn_bad_lines""": self.warn_bad_lines, """skipfooter""": self.skipfooter, """doublequote""": self.doublequote, """memory_map""": self.memory_map, """float_precision""": self.float_precision, """chunksize""": self.chunksize, """encoding_errors""": self.encoding_errors, """on_bad_lines""": self.on_bad_lines, """date_format""": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __UpperCAmelCase ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowercase ( datasets.ArrowBasedBuilder): """simple docstring""" a__ : int = CsvConfig def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Dict ) -> Optional[int]: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) UpperCAmelCase_= dl_manager.download_and_extract(self.config.data_files ) if isinstance(__UpperCAmelCase , (str, list, tuple) ): UpperCAmelCase_= data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase ): UpperCAmelCase_= [files] UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] UpperCAmelCase_= [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): UpperCAmelCase_= [files] UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"""files""": files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : pa.Table ) -> pa.Table: if self.config.features is not None: UpperCAmelCase_= self.config.features.arrow_schema if all(not require_storage_cast(__UpperCAmelCase ) for feature in self.config.features.values() ): # cheaper cast UpperCAmelCase_= pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__UpperCAmelCase ) else: # more expensive cast; allows str <-> int/float or str to Audio for example UpperCAmelCase_= table_cast(__UpperCAmelCase , __UpperCAmelCase ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[Any] ) -> List[str]: UpperCAmelCase_= self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str UpperCAmelCase_= ( { name: dtype.to_pandas_dtype() if not require_storage_cast(__UpperCAmelCase ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase ) ): UpperCAmelCase_= pd.read_csv(__UpperCAmelCase , iterator=__UpperCAmelCase , dtype=__UpperCAmelCase , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(__UpperCAmelCase ): UpperCAmelCase_= pa.Table.from_pandas(__UpperCAmelCase ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__UpperCAmelCase ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCAmelCase )}: {e}""" ) raise
277
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : Optional[Any] = logging.get_logger(__name__) a : str = { "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class a ( lowercase__ ): """simple docstring""" a : Optional[Any] = 'pix2struct_text_model' a : Union[str, Any] = ['past_key_values'] a : Optional[Any] = { 'hidden_size': 'hidden_size', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : str , __lowercase : List[str]=50244 , __lowercase : Optional[int]=768 , __lowercase : Tuple=64 , __lowercase : Union[str, Any]=2048 , __lowercase : List[Any]=12 , __lowercase : Optional[Any]=12 , __lowercase : Union[str, Any]=32 , __lowercase : Tuple=128 , __lowercase : Any=0.1 , __lowercase : Optional[int]=1e-6 , __lowercase : Tuple=1.0 , __lowercase : str="gelu_new" , __lowercase : Optional[Any]=0 , __lowercase : Any=False , __lowercase : Any=0 , __lowercase : Tuple=1 , __lowercase : Dict=False , __lowercase : Optional[int]=True , **__lowercase : List[str] , ) -> Tuple: __UpperCAmelCase : Union[str, Any] = vocab_size __UpperCAmelCase : List[Any] = hidden_size __UpperCAmelCase : List[Any] = d_kv __UpperCAmelCase : Optional[Any] = d_ff __UpperCAmelCase : Any = num_layers __UpperCAmelCase : int = num_heads __UpperCAmelCase : Optional[Any] = relative_attention_num_buckets __UpperCAmelCase : List[str] = relative_attention_max_distance __UpperCAmelCase : int = dropout_rate __UpperCAmelCase : List[Any] = layer_norm_epsilon __UpperCAmelCase : Dict = initializer_factor __UpperCAmelCase : Any = use_cache __UpperCAmelCase : Tuple = eos_token_id __UpperCAmelCase : int = decoder_start_token_id # for backwards compatibility __UpperCAmelCase : str = dense_act_fn super().__init__( pad_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , tie_word_embeddings=__lowercase , is_decoder=__lowercase , **__lowercase , ) @classmethod def UpperCAmelCase ( cls : List[Any] , __lowercase : Union[str, os.PathLike] , **__lowercase : Tuple ) -> "PretrainedConfig": cls._set_token_in_kwargs(__lowercase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = cls.get_config_dict(__lowercase , **__lowercase ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": __UpperCAmelCase : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowercase , **__lowercase ) class a ( lowercase__ ): """simple docstring""" a : str = 'pix2struct_vision_model' def __init__( self : Tuple , __lowercase : Tuple=768 , __lowercase : Union[str, Any]=768 , __lowercase : List[Any]=2048 , __lowercase : Dict=64 , __lowercase : Tuple=12 , __lowercase : Union[str, Any]=12 , __lowercase : List[Any]="gelu_new" , __lowercase : Tuple=1e-6 , __lowercase : Any=0.0 , __lowercase : Dict=0.0 , __lowercase : int=1e-1_0 , __lowercase : List[Any]=1.0 , __lowercase : Optional[int]=4096 , __lowercase : str=32 , __lowercase : str=128 , **__lowercase : Any , ) -> Optional[Any]: super().__init__(**__lowercase ) __UpperCAmelCase : List[Any] = hidden_size __UpperCAmelCase : Dict = patch_embed_hidden_size __UpperCAmelCase : Optional[int] = d_ff __UpperCAmelCase : Optional[int] = dropout_rate __UpperCAmelCase : int = num_hidden_layers __UpperCAmelCase : Tuple = num_attention_heads __UpperCAmelCase : int = initializer_range __UpperCAmelCase : Dict = initializer_factor __UpperCAmelCase : int = attention_dropout __UpperCAmelCase : Any = layer_norm_eps __UpperCAmelCase : Union[str, Any] = dense_act_fn __UpperCAmelCase : str = seq_len __UpperCAmelCase : List[str] = relative_attention_num_buckets __UpperCAmelCase : str = relative_attention_max_distance __UpperCAmelCase : Tuple = d_kv @classmethod def UpperCAmelCase ( cls : Optional[int] , __lowercase : Union[str, os.PathLike] , **__lowercase : int ) -> "PretrainedConfig": cls._set_token_in_kwargs(__lowercase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = cls.get_config_dict(__lowercase , **__lowercase ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": __UpperCAmelCase : Tuple = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowercase , **__lowercase ) class a ( lowercase__ ): """simple docstring""" a : List[Any] = 'pix2struct' a : Optional[int] = True def __init__( self : List[Any] , __lowercase : Dict=None , __lowercase : Dict=None , __lowercase : Dict=1.0 , __lowercase : Optional[int]=0.02 , __lowercase : Dict=False , __lowercase : int=False , __lowercase : List[Any]=True , **__lowercase : Dict , ) -> Dict: super().__init__(tie_word_embeddings=__lowercase , is_encoder_decoder=__lowercase , **__lowercase ) if text_config is None: __UpperCAmelCase : str = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: __UpperCAmelCase : str = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) __UpperCAmelCase : str = PixaStructTextConfig(**__lowercase ) __UpperCAmelCase : Any = PixaStructVisionConfig(**__lowercase ) __UpperCAmelCase : List[str] = self.text_config.decoder_start_token_id __UpperCAmelCase : Tuple = self.text_config.pad_token_id __UpperCAmelCase : str = self.text_config.eos_token_id __UpperCAmelCase : str = initializer_factor __UpperCAmelCase : List[str] = initializer_range __UpperCAmelCase : Tuple = self.initializer_range __UpperCAmelCase : Optional[Any] = self.initializer_range __UpperCAmelCase : Dict = is_vqa @classmethod def UpperCAmelCase ( cls : Tuple , __lowercase : PixaStructTextConfig , __lowercase : PixaStructVisionConfig , **__lowercase : Optional[Any] ) -> Optional[Any]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowercase ) def UpperCAmelCase ( self : Tuple ) -> List[str]: __UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ ) __UpperCAmelCase : int = self.text_config.to_dict() __UpperCAmelCase : Optional[int] = self.vision_config.to_dict() __UpperCAmelCase : int = self.__class__.model_type return output
114
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a : Union[str, Any] = logging.get_logger(__name__) class a ( lowercase__ ): """simple docstring""" a : Any = ['pixel_values'] def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : int = 0.9 , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : Union[int, float] = 1 / 255 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Any , ) -> None: super().__init__(**__lowercase ) __UpperCAmelCase : Tuple = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) __UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : Any = get_size_dict(__lowercase , param_name="""crop_size""" ) __UpperCAmelCase : Dict = do_resize __UpperCAmelCase : Dict = size __UpperCAmelCase : Tuple = crop_pct __UpperCAmelCase : List[Any] = resample __UpperCAmelCase : List[Any] = do_center_crop __UpperCAmelCase : List[Any] = crop_size __UpperCAmelCase : Any = do_rescale __UpperCAmelCase : Tuple = rescale_factor __UpperCAmelCase : int = do_normalize __UpperCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase ( self : Tuple , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[float] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[int] , ) -> np.ndarray: __UpperCAmelCase : Tuple = get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) if crop_pct is not None: if "shortest_edge" in size: __UpperCAmelCase : Union[str, Any] = int(size["""shortest_edge"""] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __UpperCAmelCase : Tuple = int(size["""height"""] / crop_pct ) else: __UpperCAmelCase : str = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct )) else: raise ValueError("""Invalid size for resize: {}""".format(__lowercase ) ) __UpperCAmelCase : str = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase ) else: if "shortest_edge" in size: __UpperCAmelCase : List[str] = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase ) elif "height" in size and "width" in size: __UpperCAmelCase : int = (size["""height"""], size["""width"""]) else: raise ValueError("""Invalid size for resize: {}""".format(__lowercase ) ) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCAmelCase ( self : Dict , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Union[str, Any] , ) -> np.ndarray: __UpperCAmelCase : Optional[Any] = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase ) def UpperCAmelCase ( self : List[str] , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : int , ) -> int: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCAmelCase ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCAmelCase ( self : Any , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : int = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : List[str] , ) -> PIL.Image.Image: __UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct __UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : Optional[int] = size if size is not None else self.size __UpperCAmelCase : Dict = get_size_dict(__lowercase , default_to_square=__lowercase ) __UpperCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Tuple = get_size_dict(__lowercase , param_name="""crop_size""" ) __UpperCAmelCase : Dict = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_pct is None: raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __UpperCAmelCase : str = [to_numpy_array(__lowercase ) for image in images] if do_resize: __UpperCAmelCase : str = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images] if do_center_crop: __UpperCAmelCase : Any = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images] if do_rescale: __UpperCAmelCase : List[str] = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: __UpperCAmelCase : str = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] __UpperCAmelCase : List[str] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
114
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : Dict = { "configuration_efficientformer": [ "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientFormerConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["EfficientFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = [ "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientFormerForImageClassification", "EfficientFormerForImageClassificationWithTeacher", "EfficientFormerModel", "EfficientFormerPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerModel", "TFEfficientFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
356
'''simple docstring''' def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ ) -> int: """simple docstring""" if index == r: for j in range(A__ ): print(data[j] , end=' ' ) print(' ' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location UpperCamelCase = arr[i] combination_util(A__ , A__ , A__ , index + 1 , A__ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(A__ , A__ , A__ , A__ , A__ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def __lowerCamelCase ( A__ , A__ , A__ ) -> Union[str, Any]: """simple docstring""" # A temporary array to store all combination one by one UpperCamelCase = [0] * r # Print all combination using temporary array 'data[]' combination_util(A__ , A__ , A__ , 0 , A__ , 0 ) if __name__ == "__main__": # Driver code to check the function above _lowerCamelCase : Optional[Any] = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
249
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor a__ : Optional[int] = logging.get_logger(__name__) class lowercase_ ( a__ ): def __init__( self , *a , **a ): warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , a , ) super().__init__(*a , **a )
80
'''simple docstring''' import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowercase ) , """Tatoeba directory does not exist.""" ) class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' _UpperCamelCase : str = tempfile.mkdtemp() return TatoebaConverter(save_dir=lowerCamelCase__ ) @slow def UpperCamelCase_ ( self : Any ): '''simple docstring''' self.resolver.convert_models(['heb-eng'] ) @slow def UpperCamelCase_ ( self : Dict ): '''simple docstring''' _UpperCamelCase , _UpperCamelCase : Dict = self.resolver.write_model_card('opus-mt-he-en' ,dry_run=lowerCamelCase__ ) assert mmeta["long_pair"] == "heb-eng"
83
0
"""simple docstring""" import random def __lowerCamelCase ( a_ : Union[str, Any] , a_ : str , a_ : Optional[int] ) -> List[Any]: __SCREAMING_SNAKE_CASE :Any = a[left_index] __SCREAMING_SNAKE_CASE :Tuple = left_index + 1 for j in range(left_index + 1 , a_ ): if a[j] < pivot: __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[Any] = a[i], a[j] i += 1 __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = a[i - 1], a[left_index] return i - 1 def __lowerCamelCase ( a_ : str , a_ : int , a_ : Any ) -> List[str]: if left < right: __SCREAMING_SNAKE_CASE :Optional[Any] = random.randint(a_ , right - 1 ) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = ( a[left], a[pivot], ) # switches the pivot with the left most bound __SCREAMING_SNAKE_CASE :Union[str, Any] = partition(a_ , a_ , a_ ) quick_sort_random( a_ , a_ , a_ ) # recursive quicksort to the left of the pivot point quick_sort_random( a_ , pivot_index + 1 , a_ ) # recursive quicksort to the right of the pivot point def __lowerCamelCase ( ) -> Any: __SCREAMING_SNAKE_CASE :Dict = input('''Enter numbers separated by a comma:\n''' ).strip() __SCREAMING_SNAKE_CASE :Optional[int] = [int(a_ ) for item in user_input.split(''',''' )] quick_sort_random(a_ , 0 , len(a_ ) ) print(a_ ) if __name__ == "__main__": main()
239
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowerCamelCase_ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", "|", "|"), datarow=DataRow("", "|", "|"), padding=1, with_header_hide=None, ) lowerCamelCase_ = [] lowerCamelCase_ = [] lowerCamelCase_ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}} lowerCamelCase_ = [ { "type": "header", "text": { "type": "plain_text", "text": f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results', "emoji": True, }, } ] lowerCamelCase_ = 0 for log in Path().glob("*.log"): lowerCamelCase_ = 0 with open(log, "r") as f: for line in f: lowerCamelCase_ = json.loads(line) if line.get("nodeid", "") != "": lowerCamelCase_ = line["nodeid"] if line.get("duration", None) is not None: lowerCamelCase_ = f'{line["duration"]:.4f}' if line.get("outcome", "") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("_")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowerCamelCase_ = [] log.unlink() lowerCamelCase_ = "" lowerCamelCase_ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" lowerCamelCase_ = [] lowerCamelCase_ = {} for test in failed_tests: lowerCamelCase_ = test[0].split("::") lowerCamelCase_ = data[0].split("/")[-1] if data[0] not in filesafailed: lowerCamelCase_ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowerCamelCase_ = [test[0] for test in failed_table] lowerCamelCase_ = list(set(files)) # Count number of instances in failed_tests lowerCamelCase_ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowerCamelCase_ = tabulate( table, headers=["Test Location", "Num Failed"], tablefmt=hf_table_format, stralign="right", ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_0_0_0: lowerCamelCase_ = "Too many failed tests, please see the full report in the Action results." lowerCamelCase_ = len(err) + 1_0 lowerCamelCase_ = message[: 3_0_0_0 - offset] + f'\n...\n```\n{err}' print(f'### {message}') else: lowerCamelCase_ = "No failed tests! 🤗" print(f'## {message}') payload.append(no_error_payload) if os.environ.get("TEST_TYPE", "") != "": from slack_sdk import WebClient lowerCamelCase_ = WebClient(token=os.environ["SLACK_API_TOKEN"]) if message != "No failed tests! 🤗": lowerCamelCase_ = { "type": "section", "text": { "type": "mrkdwn", "text": message, }, } payload.append(md_report) lowerCamelCase_ = { "type": "section", "text": { "type": "mrkdwn", "text": "*For more details:*", }, "accessory": { "type": "button", "text": { "type": "plain_text", "text": "Check Action results", "emoji": True, }, "url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } payload.append(action_button) lowerCamelCase_ = { "type": "context", "elements": [ { "type": "plain_text", "text": f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}', } ], } payload.append(date_report) lowerCamelCase_ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload) lowerCamelCase_ = response.data["ts"] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowerCamelCase_ = "" for i, row in enumerate(test_failures): if row[0] != test_class: lowerCamelCase_ = row[0] else: lowerCamelCase_ = "" lowerCamelCase_ = { "type": "section", "text": { "type": "mrkdwn", "text": f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```', }, } client.chat_postMessage( channel="#accelerate-ci-daily", thread_ts=ts, blocks=[payload], )
239
1
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _snake_case( SCREAMING_SNAKE_CASE__ : BertModel , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]: '''simple docstring''' A__ = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value') A__ = ( ('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel'), ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): os.makedirs(SCREAMING_SNAKE_CASE__ ) A__ = model.state_dict() def to_tf_var_name(SCREAMING_SNAKE_CASE__ : str ): for patt, repl in iter(SCREAMING_SNAKE_CASE__ ): A__ = name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return f'bert/{name}' def create_tf_var(SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : tf.Session ): A__ = tf.dtypes.as_dtype(tensor.dtype ) A__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE__ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE__ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(SCREAMING_SNAKE_CASE__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: A__ = to_tf_var_name(SCREAMING_SNAKE_CASE__ ) A__ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): A__ = torch_tensor.T A__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ , session=SCREAMING_SNAKE_CASE__ ) tf.keras.backend.set_value(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = session.run(SCREAMING_SNAKE_CASE__ ) print(f'Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}' ) A__ = tf.train.Saver(tf.trainable_variables() ) saver.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , model_name.replace('-' , '_' ) + '.ckpt' ) ) def _snake_case( SCREAMING_SNAKE_CASE__ : Any=None ) -> Any: '''simple docstring''' A__ = argparse.ArgumentParser() parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='model name e.g. bert-base-uncased' ) parser.add_argument( '--cache_dir' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Directory containing pytorch model' ) parser.add_argument('--pytorch_model_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='/path/to/<pytorch-model-name>.bin' ) parser.add_argument('--tf_cache_dir' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Directory in which to save tensorflow model' ) A__ = parser.parse_args(SCREAMING_SNAKE_CASE__ ) A__ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
7
def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''Length must be a positive.''' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''Length must be a positive.''' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
101
0
from math import ceil, sqrt def _a ( UpperCAmelCase = 1000000 ) -> int: """simple docstring""" lowerCamelCase__ : Any = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowerCamelCase__ : List[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowerCamelCase__ : Union[str, Any] = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F'''{solution() = }''')
353
from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Any = logging.get_logger(__name__) _A : int = { 'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): _UpperCAmelCase : Dict = "timesformer" def __init__( self : List[str] , A : int=2_2_4 , A : Optional[Any]=1_6 , A : str=3 , A : str=8 , A : Any=7_6_8 , A : Dict=1_2 , A : Optional[Any]=1_2 , A : Any=3_0_7_2 , A : str="gelu" , A : Optional[int]=0.0 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : int=1e-6 , A : Tuple=True , A : Any="divided_space_time" , A : Optional[Any]=0 , **A : Tuple , ) ->str: super().__init__(**A ) lowerCamelCase__ : Optional[Any] = image_size lowerCamelCase__ : int = patch_size lowerCamelCase__ : Any = num_channels lowerCamelCase__ : Optional[int] = num_frames lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : Dict = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob lowerCamelCase__ : Dict = attention_probs_dropout_prob lowerCamelCase__ : List[Any] = initializer_range lowerCamelCase__ : str = layer_norm_eps lowerCamelCase__ : Union[str, Any] = qkv_bias lowerCamelCase__ : str = attention_type lowerCamelCase__ : List[str] = drop_path_rate
265
0
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A : Dict = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' A : Union[str, Any] = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' A : Dict = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A( datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=4 , _snake_case=False ) -> Any: '''simple docstring''' __a = compute_bleu( reference_corpus=_snake_case , translation_corpus=_snake_case , max_order=_snake_case , smooth=_snake_case ) ((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
6
"""simple docstring""" from typing import Any def __lowerCamelCase ( a_ : list ) -> list[Any]: if not input_list: return [] __SCREAMING_SNAKE_CASE :int = [input_list.count(a_ ) for value in input_list] __SCREAMING_SNAKE_CASE :str = max(a_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(a_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
191
0
from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A , __A=None , __A=None , __A=0 ) -> List[str]: lowerCAmelCase_ :Tuple = 1.0 if scale is None else scale lowerCAmelCase_ :str = 0.0 if loc is None else loc super().__init__(__A , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__A )] ) @property def __lowerCAmelCase ( self ) -> Tuple: return self.base_dist.mean * self.scale + self.loc @property def __lowerCAmelCase ( self ) -> Tuple: return self.base_dist.variance * self.scale**2 @property def __lowerCAmelCase ( self ) -> Tuple: return self.variance.sqrt() class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , __A , __A , __A , **__A ) -> None: super().__init__(**__A ) lowerCAmelCase_ :Any = args_dim lowerCAmelCase_ :Dict = nn.ModuleList([nn.Linear(__A , __A ) for dim in args_dim.values()] ) lowerCAmelCase_ :Any = domain_map def __lowerCAmelCase ( self , __A ) -> Tuple[torch.Tensor]: lowerCAmelCase_ :str = [proj(__A ) for proj in self.proj] return self.domain_map(*__A ) class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , __A ) -> List[Any]: super().__init__() lowerCAmelCase_ :str = function def __lowerCAmelCase ( self , __A , *__A ) -> Optional[Any]: return self.function(__A , *__A ) class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :type UpperCAmelCase_ :int UpperCAmelCase_ :Dict[str, int] def __init__( self , __A = 1 ) -> None: lowerCAmelCase_ :List[str] = dim lowerCAmelCase_ :Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim} def __lowerCAmelCase ( self , __A ) -> List[str]: if self.dim == 1: return self.distribution_class(*__A ) else: return Independent(self.distribution_class(*__A ) , 1 ) def __lowerCAmelCase ( self , __A , __A = None , __A = None , ) -> Distribution: lowerCAmelCase_ :Optional[Any] = self._base_distribution(__A ) if loc is None and scale is None: return distr else: return AffineTransformed(__A , loc=__A , scale=__A , event_dim=self.event_dim ) @property def __lowerCAmelCase ( self ) -> Tuple: return () if self.dim == 1 else (self.dim,) @property def __lowerCAmelCase ( self ) -> int: return len(self.event_shape ) @property def __lowerCAmelCase ( self ) -> float: return 0.0 def __lowerCAmelCase ( self , __A ) -> nn.Module: return ParameterProjection( in_features=__A , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __lowerCAmelCase ( self , *__A ) -> Dict: raise NotImplementedError() @staticmethod def __lowerCAmelCase ( __A ) -> torch.Tensor: return (x + torch.sqrt(torch.square(__A ) + 4.0 )) / 2.0 class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} UpperCAmelCase_ :type = StudentT @classmethod def __lowerCAmelCase ( cls , __A , __A , __A ) -> Dict: lowerCAmelCase_ :List[str] = cls.squareplus(__A ).clamp_min(torch.finfo(scale.dtype ).eps ) lowerCAmelCase_ :Dict = 2.0 + cls.squareplus(__A ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict[str, int] = {"loc": 1, "scale": 1} UpperCAmelCase_ :type = Normal @classmethod def __lowerCAmelCase ( cls , __A , __A ) -> Optional[int]: lowerCAmelCase_ :Optional[int] = cls.squareplus(__A ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict[str, int] = {"total_count": 1, "logits": 1} UpperCAmelCase_ :type = NegativeBinomial @classmethod def __lowerCAmelCase ( cls , __A , __A ) -> Optional[Any]: lowerCAmelCase_ :List[str] = cls.squareplus(__A ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __lowerCAmelCase ( self , __A ) -> Distribution: lowerCAmelCase_ :Optional[int] = distr_args if self.dim == 1: return self.distribution_class(total_count=__A , logits=__A ) else: return Independent(self.distribution_class(total_count=__A , logits=__A ) , 1 ) def __lowerCAmelCase ( self , __A , __A = None , __A = None ) -> Distribution: lowerCAmelCase_ :Optional[Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
359
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *__A , **__A ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A )
1
0
from functools import reduce snake_case : Optional[int] = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowercase ( __lowerCAmelCase : str = N ): return max( # mypy cannot properly interpret reduce int(reduce(lambda __lowerCAmelCase , __lowerCAmelCase : str(int(__lowerCAmelCase ) * int(__lowerCAmelCase ) ) , n[i : i + 1_3] ) ) for i in range(len(__lowerCAmelCase ) - 1_2 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
240
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers snake_case : List[str] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=None ): require_version(deps[pkg] , __lowerCAmelCase )
240
1
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__) # pylint: disable=invalid-name __SCREAMING_SNAKE_CASE :List[str] = ''' Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") >>> repo = "openai/shap-e-img2img" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png" >>> image = load_image(image_url).convert("RGB") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], "corgi_3d.gif") ``` ''' @dataclass class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Union[PIL.Image.Image, np.ndarray] class A_ ( lowerCAmelCase_ ): def __init__( self : Any , snake_case_ : PriorTransformer , snake_case_ : CLIPVisionModel , snake_case_ : CLIPImageProcessor , snake_case_ : HeunDiscreteScheduler , snake_case_ : ShapERenderer , ): super().__init__() self.register_modules( prior=snake_case_ , image_encoder=snake_case_ , image_processor=snake_case_ , scheduler=snake_case_ , renderer=snake_case_ , ) def lowercase ( self : List[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ): if latents is None: _UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ ) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' ) _UpperCAmelCase = latents.to(snake_case_ ) _UpperCAmelCase = latents * scheduler.init_noise_sigma return latents def lowercase ( self : Optional[Any] , snake_case_ : Union[str, Any]=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) _UpperCAmelCase = torch.device(f'cuda:{gpu_id}' ) _UpperCAmelCase = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(snake_case_ , snake_case_ ) @property def lowercase ( self : List[Any] ): if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(snake_case_ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : int , snake_case_ : List[str] , ): if isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , torch.Tensor ): _UpperCAmelCase = torch.cat(snake_case_ , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case_ , axis=0 ) if not isinstance(snake_case_ , torch.Tensor ): _UpperCAmelCase = self.image_processor(snake_case_ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 ) _UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=snake_case_ ) _UpperCAmelCase = self.image_encoder(snake_case_ )["last_hidden_state"] _UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _UpperCAmelCase = image_embeds.repeat_interleave(snake_case_ , dim=0 ) if do_classifier_free_guidance: _UpperCAmelCase = torch.zeros_like(snake_case_ ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(snake_case_ ) def __call__( self : str , snake_case_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , snake_case_ : int = 1 , snake_case_ : int = 2_5 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : float = 4.0 , snake_case_ : int = 6_4 , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ): if isinstance(snake_case_ , PIL.Image.Image ): _UpperCAmelCase = 1 elif isinstance(snake_case_ , torch.Tensor ): _UpperCAmelCase = image.shape[0] elif isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _UpperCAmelCase = len(snake_case_ ) else: raise ValueError( f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case_ )}' ) _UpperCAmelCase = self._execution_device _UpperCAmelCase = batch_size * num_images_per_prompt _UpperCAmelCase = guidance_scale > 1.0 _UpperCAmelCase = self._encode_image(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # prior self.scheduler.set_timesteps(snake_case_ , device=snake_case_ ) _UpperCAmelCase = self.scheduler.timesteps _UpperCAmelCase = self.prior.config.num_embeddings _UpperCAmelCase = self.prior.config.embedding_dim _UpperCAmelCase = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _UpperCAmelCase = latents.reshape(latents.shape[0] , snake_case_ , snake_case_ ) for i, t in enumerate(self.progress_bar(snake_case_ ) ): # expand the latents if we are doing classifier free guidance _UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ ) _UpperCAmelCase = self.prior( snake_case_ , timestep=snake_case_ , proj_embedding=snake_case_ , ).predicted_image_embedding # remove the variance _UpperCAmelCase , _UpperCAmelCase = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 ) _UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _UpperCAmelCase = self.scheduler.step( snake_case_ , timestep=snake_case_ , sample=snake_case_ , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=snake_case_ ) _UpperCAmelCase = [] for i, latent in enumerate(snake_case_ ): print() _UpperCAmelCase = self.renderer.decode( latent[None, :] , snake_case_ , size=snake_case_ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , ) images.append(snake_case_ ) _UpperCAmelCase = torch.stack(snake_case_ ) if output_type not in ["np", "pil"]: raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' ) _UpperCAmelCase = images.cpu().numpy() if output_type == "pil": _UpperCAmelCase = [self.numpy_to_pil(snake_case_ ) for image in images] # Offload last model to CPU if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=snake_case_ )
368
'''simple docstring''' from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING __SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase_ ) class A_ ( lowerCAmelCase_ ): def __init__( self : List[str] , *snake_case_ : Dict , **snake_case_ : Dict ): super().__init__(*snake_case_ , **snake_case_ ) requires_backends(self , "vision" ) self.check_model_type(snake_case_ ) def __call__( self : Optional[Any] , snake_case_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case_ : Optional[int] ): return super().__call__(snake_case_ , **snake_case_ ) def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ): return {}, {}, {} def lowercase ( self : Dict , snake_case_ : Optional[int] ): _UpperCAmelCase = load_image(snake_case_ ) _UpperCAmelCase = image.size _UpperCAmelCase = self.image_processor(images=snake_case_ , return_tensors=self.framework ) return model_inputs def lowercase ( self : Optional[int] , snake_case_ : List[Any] ): _UpperCAmelCase = self.model(**snake_case_ ) return model_outputs def lowercase ( self : List[str] , snake_case_ : Dict ): _UpperCAmelCase = model_outputs.predicted_depth _UpperCAmelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=snake_case_ ) _UpperCAmelCase = prediction.squeeze().cpu().numpy() _UpperCAmelCase = (output * 2_5_5 / np.max(snake_case_ )).astype("uint8" ) _UpperCAmelCase = Image.fromarray(snake_case_ ) _UpperCAmelCase = {} _UpperCAmelCase = predicted_depth _UpperCAmelCase = depth return output_dict
156
0
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(A__ ): requests.request("GET" ,"https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" ,"https://huggingface.co" ,timeout=1.0 ) @pytest.mark.integration def snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" ,"https://huggingface.co" ) def snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(A__ ): http_head("https://huggingface.co" )
268
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ (__A ): def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]="None" , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=None , ) -> Dict: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : Optional[Any] = seq_length UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Optional[int] = use_input_mask UpperCAmelCase_ : int = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[Any] = num_choices UpperCAmelCase_ : List[str] = relative_attention UpperCAmelCase_ : List[Any] = position_biased_input UpperCAmelCase_ : Dict = pos_att_type UpperCAmelCase_ : Optional[Any] = scope def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Tuple = None if self.use_input_mask: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[Any] = None if self.use_token_type_ids: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = self.get_config() UpperCAmelCase_ : int = 300 return config def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int ) -> List[Any]: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = DebertaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = DebertaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = DebertaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> str: UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : Optional[int] = DebertaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[Any]: UpperCAmelCase_ : Dict = DebertaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Any = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Tuple = config_and_inputs UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = DebertaModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ (unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: pass @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained("microsoft/deberta-base" ) UpperCAmelCase_ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) UpperCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] # compare the actual values for a slice. UpperCAmelCase_ : Tuple = torch.tensor( [[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
268
1
def UpperCamelCase( lowercase_ ) -> list: '''simple docstring''' def merge(lowercase_ , lowercase_ ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(lowercase_ ) <= 1: return collection snake_case_ = len(lowercase_ ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase_ = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
371
from __future__ import annotations def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]: '''simple docstring''' if (voltage, current, resistance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance < 0: raise ValueError("""Resistance cannot be negative""" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
34
0
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n' class lowerCAmelCase__ ( unittest.TestCase ): def lowerCAmelCase__ ( self : Any ) ->Tuple: '''simple docstring''' _UpperCAmelCase : List[str] = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) ) _UpperCAmelCase : Optional[int] = self.diffusers_dir shutil.copy( os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , ) def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[str] = "src/diffusers" shutil.rmtree(self.diffusers_dir ) def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ) ->List[str]: '''simple docstring''' _UpperCAmelCase : List[str] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _UpperCAmelCase : List[Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _UpperCAmelCase : str = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) _UpperCAmelCase : int = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ ) _UpperCAmelCase : Tuple = os.path.join(self.diffusers_dir , "new_code.py" ) with open(lowerCamelCase__ , "w" , newline="\n" ) as f: f.write(lowerCamelCase__ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ ) with open(lowerCamelCase__ , "r" ) as f: self.assertTrue(f.read() , lowerCamelCase__ ) def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Any = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) def lowerCAmelCase__ ( self : List[Any] ) ->Dict: '''simple docstring''' self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , ) # Copy consistency with a really long name _UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
234
'''simple docstring''' import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) lowerCamelCase__ = logging.getLogger() lowerCamelCase__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowerCAmelCase__ ( UpperCAmelCase__ ): def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] ) ->Tuple: '''simple docstring''' os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) _UpperCAmelCase : List[Any] = {"source": "What is love ?", "target": "life"} _UpperCAmelCase : Any = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: _UpperCAmelCase : Dict = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(lowerCamelCase__ , F"""{split}.{field}""" ) , "w" ) as f: f.write(lowerCamelCase__ ) def lowerCAmelCase__ ( self : int , lowerCamelCase__ : int , lowerCamelCase__ : str = "pytorch" ) ->Any: '''simple docstring''' _UpperCAmelCase : Any = self.get_auto_remove_tmp_dir() _UpperCAmelCase : int = os.path.join(lowerCamelCase__ , "output" ) _UpperCAmelCase : Tuple = os.path.join(lowerCamelCase__ , "data" ) self._create_dummy_data(data_dir=lowerCamelCase__ ) _UpperCAmelCase : str = F""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(F"""--gpus={gpus}""" ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) _UpperCAmelCase : str = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(lowerCamelCase__ , env=self.get_env() ) _UpperCAmelCase : Optional[int] = os.path.join(lowerCamelCase__ , "metrics.json" ) with open(lowerCamelCase__ ) as f: _UpperCAmelCase : Dict = json.load(lowerCamelCase__ ) return result @require_torch_gpu def lowerCAmelCase__ ( self : Dict ) ->Dict: '''simple docstring''' _UpperCAmelCase : Optional[Any] = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def lowerCAmelCase__ ( self : List[Any] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : List[str] = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def lowerCAmelCase__ ( self : int ) ->str: '''simple docstring''' _UpperCAmelCase : Any = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def lowerCAmelCase__ ( self : int ) ->Any: '''simple docstring''' _UpperCAmelCase : str = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
234
1
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets _lowercase : Optional[Any] ="\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n" _lowercase : int ="\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n" _lowercase : int ="\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n" def lowerCAmelCase_ ( _lowercase : int , _lowercase : Dict) -> Tuple: """simple docstring""" return float((preds == labels).mean()) def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : Optional[Any]) -> Union[str, Any]: """simple docstring""" a__ : Any = simple_accuracy(_lowercase , _lowercase) a__ : int = float(fa_score(y_true=_lowercase , y_pred=_lowercase)) return { "accuracy": acc, "f1": fa, } def lowerCAmelCase_ ( _lowercase : List[Any] , _lowercase : str) -> int: """simple docstring""" a__ : Dict = np.array(_lowercase) a__ : int = np.array(_lowercase) a__ : Optional[int] = en_sentvecs.shape[0] # mean centering a__ : List[str] = en_sentvecs - np.mean(_lowercase , axis=0) a__ : Tuple = in_sentvecs - np.mean(_lowercase , axis=0) a__ : Any = cdist(_lowercase , _lowercase , """cosine""") a__ : Optional[int] = np.array(range(_lowercase)) a__ : Union[str, Any] = sim.argsort(axis=1)[:, :10] a__ : Any = np.any(preds == actual[:, None] , axis=1) return float(matches.mean()) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ (datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self ) -> Any: """simple docstring""" if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """ """\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """ """\"wiki-ner\"]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" ) if self.config_name != """cvit-mkb-clsr""" else datasets.Sequence(datasets.Value("""float32""" ) ), """references""": datasets.Value("""int64""" ) if self.config_name != """cvit-mkb-clsr""" else datasets.Sequence(datasets.Value("""float32""" ) ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Optional[Any]: """simple docstring""" if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(__lowercase , __lowercase )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(__lowercase , __lowercase ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """ """\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """ """\"wiki-ner\"]""" )
266
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : Dict=False) -> Any: """simple docstring""" try: a__ : str = os.environ[key] except KeyError: # KEY isn't set, default to `default`. a__ : Optional[int] = default else: # KEY is set, convert it to True or False. try: a__ : Optional[int] = strtobool(_lowercase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''') return _value _lowercase : Dict =parse_flag_from_env("RUN_SLOW", default=False) def lowerCAmelCase_ ( _lowercase : Any) -> str: """simple docstring""" return unittest.skip("""Test was skipped""")(_lowercase) def lowerCAmelCase_ ( _lowercase : str) -> List[str]: """simple docstring""" return unittest.skipUnless(_run_slow_tests , """test is slow""")(_lowercase) def lowerCAmelCase_ ( _lowercase : List[Any]) -> Union[str, Any]: """simple docstring""" return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""")(_lowercase) def lowerCAmelCase_ ( _lowercase : List[Any]) -> List[Any]: """simple docstring""" return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""")(_lowercase) def lowerCAmelCase_ ( _lowercase : Optional[int]) -> Dict: """simple docstring""" return unittest.skipUnless(is_xpu_available() , """test requires a XPU""")(_lowercase) def lowerCAmelCase_ ( _lowercase : Tuple) -> List[str]: """simple docstring""" return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""")(_lowercase) def lowerCAmelCase_ ( _lowercase : Dict) -> Union[str, Any]: """simple docstring""" return unittest.skipUnless( is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""")(_lowercase) def lowerCAmelCase_ ( _lowercase : Tuple) -> Any: """simple docstring""" return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""")(_lowercase) def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> List[str]: """simple docstring""" return unittest.skipUnless(is_tpu_available() , """test requires TPU""")(_lowercase) def lowerCAmelCase_ ( _lowercase : str) -> int: """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""")(_lowercase) def lowerCAmelCase_ ( _lowercase : Any) -> List[str]: """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""")(_lowercase) def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Union[str, Any]: """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""")(_lowercase) def lowerCAmelCase_ ( _lowercase : int) -> Tuple: """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""")(_lowercase) def lowerCAmelCase_ ( _lowercase : int) -> Optional[Any]: """simple docstring""" return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""")(_lowercase) def lowerCAmelCase_ ( _lowercase : Optional[int]) -> List[str]: """simple docstring""" return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""")(_lowercase) def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Optional[Any]: """simple docstring""" return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""") , """test requires torch version >= 1.12.0""")(_lowercase) def lowerCAmelCase_ ( _lowercase : Any=None , _lowercase : List[str]=None) -> Dict: """simple docstring""" if test_case is None: return partial(_lowercase , version=_lowercase) return unittest.skipUnless(is_torch_version(""">=""" , _lowercase) , F'''test requires torch version >= {version}''')(_lowercase) def lowerCAmelCase_ ( _lowercase : Any) -> int: """simple docstring""" return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""")(_lowercase) def lowerCAmelCase_ ( _lowercase : str) -> Union[str, Any]: """simple docstring""" return unittest.skipUnless(is_wandb_available() , """test requires wandb""")(_lowercase) def lowerCAmelCase_ ( _lowercase : Optional[int]) -> Union[str, Any]: """simple docstring""" return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""")(_lowercase) _lowercase : List[str] =( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowerCAmelCase_ ( _lowercase : Optional[int]) -> Union[str, Any]: """simple docstring""" return unittest.skipUnless( _atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(_lowercase) class snake_case__ (unittest.TestCase ): """simple docstring""" __lowerCAmelCase :Optional[Any] = True @classmethod def SCREAMING_SNAKE_CASE__( cls ) -> Optional[int]: """simple docstring""" a__ : Tuple = tempfile.mkdtemp() @classmethod def SCREAMING_SNAKE_CASE__( cls ) -> Dict: """simple docstring""" if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir ).glob("""**/*""" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(__lowercase ) class snake_case__ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class snake_case__ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]: """simple docstring""" a__ : Tuple = mocks if isinstance(__lowercase , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowerCAmelCase_ ( _lowercase : Optional[int]) -> List[Any]: """simple docstring""" a__ : Tuple = AcceleratorState() a__ : List[str] = tensor[None].clone().to(state.device) a__ : Any = gather(_lowercase).cpu() a__ : Optional[Any] = tensor[0].cpu() for i in range(tensors.shape[0]): if not torch.equal(tensors[i] , _lowercase): return False return True class snake_case__ : """simple docstring""" def __init__( self , __lowercase , __lowercase , __lowercase ) -> Any: """simple docstring""" a__ : Any = returncode a__ : List[Any] = stdout a__ : Any = stderr async def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> List[Any]: """simple docstring""" while True: a__ : str = await stream.readline() if line: callback(_lowercase) else: break async def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Union[str, Any]=None , _lowercase : List[str]=None , _lowercase : Tuple=None , _lowercase : Optional[Any]=False , _lowercase : Dict=False) -> _RunOutput: """simple docstring""" if echo: print("""\nRunning: """ , """ """.join(_lowercase)) a__ : int = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) a__ : int = [] a__ : Optional[int] = [] def tee(_lowercase : List[str] , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Optional[Any]=""): a__ : int = line.decode("""utf-8""").rstrip() sink.append(_lowercase) if not quiet: print(_lowercase , _lowercase , file=_lowercase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _lowercase: tee(_lowercase , _lowercase , sys.stdout , label="""stdout:"""))), asyncio.create_task(_read_stream(p.stderr , lambda _lowercase: tee(_lowercase , _lowercase , sys.stderr , label="""stderr:"""))), ] , timeout=_lowercase , ) return _RunOutput(await p.wait() , _lowercase , _lowercase) def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Tuple=None , _lowercase : Any=180 , _lowercase : List[Any]=False , _lowercase : Dict=True) -> _RunOutput: """simple docstring""" a__ : Any = asyncio.get_event_loop() a__ : List[Any] = loop.run_until_complete( _stream_subprocess(_lowercase , env=_lowercase , stdin=_lowercase , timeout=_lowercase , quiet=_lowercase , echo=_lowercase)) a__ : Optional[int] = """ """.join(_lowercase) if result.returncode > 0: a__ : List[Any] = """\n""".join(result.stderr) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''') return result class snake_case__ (A__ ): """simple docstring""" pass def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : Optional[int]=False) -> Dict: """simple docstring""" try: a__ : List[Any] = subprocess.check_output(_lowercase , stderr=subprocess.STDOUT) if return_stdout: if hasattr(_lowercase , """decode"""): a__ : Tuple = output.decode("""utf-8""") return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F'''Command `{' '.join(_lowercase)}` failed with the following error:\n\n{e.output.decode()}''') from e
266
1
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def _lowerCamelCase( a ): __a = {} __a = tokenizer(example["content"] , truncation=a )["input_ids"] __a = len(example["content"] ) / len(output["input_ids"] ) return output SCREAMING_SNAKE_CASE__:Optional[int] = HfArgumentParser(PretokenizationArguments) SCREAMING_SNAKE_CASE__:str = parser.parse_args() if args.num_workers is None: SCREAMING_SNAKE_CASE__:Any = multiprocessing.cpu_count() SCREAMING_SNAKE_CASE__:int = AutoTokenizer.from_pretrained(args.tokenizer_dir) SCREAMING_SNAKE_CASE__:int = time.time() SCREAMING_SNAKE_CASE__:str = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') SCREAMING_SNAKE_CASE__:int = time.time() SCREAMING_SNAKE_CASE__:List[Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') SCREAMING_SNAKE_CASE__:Tuple = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
261
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : str = StableUnCLIPImgaImgPipeline _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : List[Any] = frozenset([] ) def a__ ( self ): __a = 32 __a = embedder_hidden_size # image encoding components __a = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __a = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __a = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __a = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __a = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __a = AutoencoderKL() __a = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ): if str(lowerCamelCase ).startswith("mps" ): __a = torch.manual_seed(lowerCamelCase ) else: __a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __a = input_image * 0.5 + 0.5 __a = input_image.clamp(0 , 1 ) __a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def a__ ( self ): __a = "cpu" # ensure determinism for the device-dependent torch.Generator __a = self.get_dummy_components() __a = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __a = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __a = sd_pipe(**lowerCamelCase ).images __a = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def a__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __a = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __a = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
261
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: lowerCAmelCase : Optional[Any] = None lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : List[Any] = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : Optional[Any] = { """google/bigbird-roberta-base""": 40_96, """google/bigbird-roberta-large""": 40_96, """google/bigbird-base-trivia-itc""": 40_96, } lowerCAmelCase : str = """▁""" class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = BigBirdTokenizer __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token _lowerCAmelCase : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token _lowerCAmelCase : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token _lowerCAmelCase : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token _lowerCAmelCase : List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token super().__init__( snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) _lowerCAmelCase : Optional[Any] = vocab_file _lowerCAmelCase : Union[str, Any] = False if not self.vocab_file else True def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : List[str] = [self.sep_token_id] _lowerCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : List[Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : str = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """roberta-base""": 5_12, """roberta-large""": 5_12, """roberta-large-mnli""": 5_12, """distilroberta-base""": 5_12, """roberta-base-openai-detector""": 5_12, """roberta-large-openai-detector""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = RobertaTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space _lowerCAmelCase : Union[str, Any] = 'post_processor' _lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Any = tuple(state['sep'] ) if "cls" in state: _lowerCAmelCase : str = tuple(state['cls'] ) _lowerCAmelCase : List[str] = False if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : Tuple = True if state.get('trim_offsets' , snake_case__ ) != trim_offsets: _lowerCAmelCase : Union[str, Any] = trim_offsets _lowerCAmelCase : Optional[int] = True if changes_to_apply: _lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) ) _lowerCAmelCase : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property def a ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value _lowerCAmelCase : Tuple = value def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class snake_case ( __snake_case ): SCREAMING_SNAKE_CASE_ : Any = """ctrl""" SCREAMING_SNAKE_CASE_ : List[Any] = ["""past_key_values"""] SCREAMING_SNAKE_CASE_ : Optional[Any] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , UpperCamelCase__ : Any=2_4_6_5_3_4 , UpperCamelCase__ : int=2_5_6 , UpperCamelCase__ : Optional[int]=1_2_8_0 , UpperCamelCase__ : int=8_1_9_2 , UpperCamelCase__ : Any=4_8 , UpperCamelCase__ : int=1_6 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=1e-6 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Tuple=True , **UpperCamelCase__ : Optional[Any] , )-> str: '''simple docstring''' __lowerCAmelCase: Optional[Any] = vocab_size __lowerCAmelCase: int = n_positions __lowerCAmelCase: int = n_embd __lowerCAmelCase: Optional[Any] = n_layer __lowerCAmelCase: List[str] = n_head __lowerCAmelCase: List[Any] = dff __lowerCAmelCase: str = resid_pdrop __lowerCAmelCase: Optional[int] = embd_pdrop __lowerCAmelCase: Tuple = layer_norm_epsilon __lowerCAmelCase: Optional[Any] = initializer_range __lowerCAmelCase: Union[str, Any] = use_cache super().__init__(**UpperCamelCase__)
217
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json" ), }, "merges_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt" ), }, "tokenizer_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json", "roberta-base-openai-detector": ( "https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json" ), "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json" ), }, } __A = { "roberta-base": 512, "roberta-large": 512, "roberta-large-mnli": 512, "distilroberta-base": 512, "roberta-base-openai-detector": 512, "roberta-large-openai-detector": 512, } class snake_case ( __snake_case ): SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE_ : Any = RobertaTokenizer def __init__( self : Optional[int] , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int="replace" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : int="<mask>" , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Tuple , )-> Optional[int]: '''simple docstring''' super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , ) __lowerCAmelCase: Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space: __lowerCAmelCase: str = getattr(UpperCamelCase__ , pre_tok_state.pop("type")) __lowerCAmelCase: Optional[int] = add_prefix_space __lowerCAmelCase: Dict = pre_tok_class(**UpperCamelCase__) __lowerCAmelCase: Any = add_prefix_space __lowerCAmelCase: int = "post_processor" __lowerCAmelCase: Optional[Any] = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__) if tokenizer_component_instance: __lowerCAmelCase: Dict = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __lowerCAmelCase: List[Any] = tuple(state["sep"]) if "cls" in state: __lowerCAmelCase: str = tuple(state["cls"]) __lowerCAmelCase: str = False if state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space: __lowerCAmelCase: Optional[Any] = add_prefix_space __lowerCAmelCase: List[str] = True if state.get("trim_offsets" , UpperCamelCase__) != trim_offsets: __lowerCAmelCase: Any = trim_offsets __lowerCAmelCase: List[str] = True if changes_to_apply: __lowerCAmelCase: str = getattr(UpperCamelCase__ , state.pop("type")) __lowerCAmelCase: List[str] = component_class(**UpperCamelCase__) setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__) @property def lowercase_ ( self : List[str])-> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet.") return None return str(self._mask_token) @mask_token.setter def lowercase_ ( self : Tuple , UpperCamelCase__ : Optional[int])-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else value __lowerCAmelCase: int = value def lowercase_ ( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any])-> BatchEncoding: '''simple docstring''' __lowerCAmelCase: List[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__) def lowercase_ ( self : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str])-> BatchEncoding: '''simple docstring''' __lowerCAmelCase: Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__) def lowercase_ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]: '''simple docstring''' __lowerCAmelCase: str = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__) return tuple(UpperCamelCase__) def lowercase_ ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=None)-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: str = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]: '''simple docstring''' __lowerCAmelCase: Optional[Any] = [self.sep_token_id] __lowerCAmelCase: str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
217
1
"""simple docstring""" import os from datetime import datetime as dt from github import Github SCREAMING_SNAKE_CASE__ = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowerCAmelCase__ ( ) -> Dict: """simple docstring""" snake_case = Github(os.environ['GITHUB_TOKEN'] ) snake_case = g.get_repo('huggingface/diffusers' ) snake_case = repo.get_issues(state='open' ) for issue in open_issues: snake_case = sorted(issue.get_comments() , key=lambda _UpperCamelCase : i.created_at , reverse=_UpperCamelCase ) snake_case = comments[0] if len(_UpperCamelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 2_3 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
354
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE__ = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
149
0
'''simple docstring''' import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py lowerCAmelCase : Any ="." if __name__ == "__main__": lowerCAmelCase : Union[str, Any] =os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') lowerCAmelCase : Tuple =[] lowerCAmelCase : Union[str, Any] =[] with open(doctest_file_path) as fp: for line in fp: lowerCAmelCase : Tuple =line.strip() lowerCAmelCase : Any =os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: lowerCAmelCase : List[str] ="\n".join(non_existent_paths) raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
223
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase__ ( self : Dict ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : Dict = 3 UpperCAmelCase_ : Dict = (32, 32) UpperCAmelCase_ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__magic_name__ ) return image @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(__magic_name__ ) @property def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" def extract(*__magic_name__ : Dict , **__magic_name__ : Any ): class __a : def __init__( self : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = torch.ones([0] ) def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> List[str]: """simple docstring""" self.pixel_values.to(__magic_name__ ) return self return Out() return extract def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.dummy_cond_unet UpperCAmelCase_ : Any = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , ) UpperCAmelCase_ : List[Any] = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : str = StableDiffusionPipeline( unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , ) UpperCAmelCase_ : List[str] = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ : Optional[Any] = torch.Generator(device=__magic_name__ ).manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) UpperCAmelCase_ : Optional[Any] = output.images UpperCAmelCase_ : Optional[Any] = torch.Generator(device=__magic_name__ ).manual_seed(0 ) UpperCAmelCase_ : Optional[int] = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__magic_name__ , )[0] UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : str = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.dummy_cond_unet UpperCAmelCase_ : str = PNDMScheduler(skip_prk_steps=__magic_name__ ) UpperCAmelCase_ : Any = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline( unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , ) UpperCAmelCase_ : str = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[str] = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ : Optional[Any] = torch.Generator(device=__magic_name__ ).manual_seed(0 ) UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) UpperCAmelCase_ : List[str] = output.images UpperCAmelCase_ : str = torch.Generator(device=__magic_name__ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__magic_name__ , )[0] UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1] UpperCAmelCase_ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : str = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : Dict ) -> Any: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__magic_name__ ) assert isinstance(__magic_name__ , __magic_name__ ) assert isinstance(pipe.scheduler , __magic_name__ ) assert pipe.safety_checker is None UpperCAmelCase_ : str = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCAmelCase_ : int = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def UpperCAmelCase__ ( self : str ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[Any] = self.dummy_cond_unet UpperCAmelCase_ : Tuple = PNDMScheduler(skip_prk_steps=__magic_name__ ) UpperCAmelCase_ : Tuple = self.dummy_vae UpperCAmelCase_ : Optional[Any] = self.dummy_text_encoder UpperCAmelCase_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # put models in fp16 UpperCAmelCase_ : Union[str, Any] = unet.half() UpperCAmelCase_ : Any = vae.half() UpperCAmelCase_ : Tuple = bert.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ : List[Any] = StableDiffusionPipeline( unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , ) UpperCAmelCase_ : str = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ : List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Tuple ) -> Any: """simple docstring""" UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__magic_name__ ) UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : Any = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Optional[int] = ( '''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle''' ''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with''' ''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and''' ''' children from bahnhof zoo, detailed ''' ) UpperCAmelCase_ : str = 40_03_66_03_46 UpperCAmelCase_ : Any = 7 # without safety guidance (sld_guidance_scale = 0) UpperCAmelCase_ : Optional[Any] = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) UpperCAmelCase_ : Tuple = output.images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Any = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) UpperCAmelCase_ : Tuple = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCAmelCase_ : Tuple = output.images UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" UpperCAmelCase_ : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__magic_name__ ) UpperCAmelCase_ : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Any = '''padme amidala taking a bath artwork, safe for work, no nudity''' UpperCAmelCase_ : Tuple = 27_34_97_17_55 UpperCAmelCase_ : List[str] = 7 UpperCAmelCase_ : Any = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : int = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[str] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 UpperCAmelCase_ : List[Any] = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : Any = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCAmelCase_ : Optional[int] = output.images UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[str] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ) UpperCAmelCase_ : Any = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Optional[int] = ( '''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.''' ''' leyendecker''' ) UpperCAmelCase_ : str = 10_44_35_52_34 UpperCAmelCase_ : int = 12 UpperCAmelCase_ : int = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) UpperCAmelCase_ : int = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 UpperCAmelCase_ : List[str] = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : int = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCAmelCase_ : Any = output.images UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Dict = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
125
0
"""simple docstring""" def a_ ( _lowercase ): _UpperCamelCase : Any = len(_lowercase ) _UpperCamelCase : Tuple = len(matrix[0] ) _UpperCamelCase : str = min(_lowercase , _lowercase ) for row in range(_lowercase ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , _lowercase ): _UpperCamelCase : Optional[int] = matrix[col][row] / matrix[row][row] for i in range(_lowercase , _lowercase ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows _UpperCamelCase : Dict = True for i in range(row + 1 , _lowercase ): if matrix[i][row] != 0: _UpperCamelCase : Dict = matrix[i], matrix[row] _UpperCamelCase : int = False break if reduce: rank -= 1 for i in range(_lowercase ): _UpperCamelCase : Union[str, Any] = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
366
"""simple docstring""" from ...processing_utils import ProcessorMixin class _a ( _lowerCAmelCase ): UpperCamelCase = ['''image_processor''', '''feature_extractor'''] UpperCamelCase = '''TvltImageProcessor''' UpperCamelCase = '''TvltFeatureExtractor''' def __init__( self : Union[str, Any], lowerCAmelCase__ : str, lowerCAmelCase__ : List[str] ) -> Optional[Any]: '''simple docstring''' super().__init__(image_processor=lowerCAmelCase__, feature_extractor=lowerCAmelCase__ ) _UpperCamelCase : List[str] = image_processor _UpperCamelCase : int = feature_extractor def __call__( self : List[str], lowerCAmelCase__ : Optional[int]=None, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Dict=None, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Optional[int]=False, lowerCAmelCase__ : str=False, *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : Optional[int], ) -> Dict: '''simple docstring''' if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''' ) _UpperCamelCase : Optional[int] = None if images is not None: _UpperCamelCase : Optional[int] = self.image_processor(lowerCAmelCase__, mask_pixel=lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__ ) if images_mixed is not None: _UpperCamelCase : str = self.image_processor(lowerCAmelCase__, is_mixed=lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__ ) if audio is not None: _UpperCamelCase : Union[str, Any] = self.feature_extractor( lowerCAmelCase__, *lowerCAmelCase__, sampling_rate=lowerCAmelCase__, mask_audio=lowerCAmelCase__, **lowerCAmelCase__ ) _UpperCamelCase : str = {} if audio is not None: output_dict.update(lowerCAmelCase__ ) if images is not None: output_dict.update(lowerCAmelCase__ ) if images_mixed_dict is not None: output_dict.update(lowerCAmelCase__ ) return output_dict @property def snake_case ( self : List[str] ) -> Tuple: '''simple docstring''' _UpperCamelCase : List[str] = self.image_processor.model_input_names _UpperCamelCase : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
128
0
"""simple docstring""" from __future__ import annotations def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,): SCREAMING_SNAKE_CASE__ : Any = len(_snake_case ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_snake_case ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,_snake_case ,_snake_case ,) def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : list[list[str]] = [] depth_first_search([] ,[] ,[] ,_snake_case ,_snake_case ) # Print all the boards for board in boards: for column in board: print(_snake_case ) print("""""" ) print(len(_snake_case ) ,"""solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
25
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = 384 SCREAMING_SNAKE_CASE__ : Tuple = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE__ : int = 96 SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2) SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96 SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE__ : Tuple = 128 SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE__ : Optional[int] = 12 SCREAMING_SNAKE_CASE__ : Optional[int] = 512 elif "large" in model_name: SCREAMING_SNAKE_CASE__ : Optional[Any] = 192 SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48) SCREAMING_SNAKE_CASE__ : List[Any] = 12 SCREAMING_SNAKE_CASE__ : Optional[Any] = 768 # set label information SCREAMING_SNAKE_CASE__ : Optional[Any] = 150 SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files""" SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : str = SwinConfig( embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,) SCREAMING_SNAKE_CASE__ : int = UperNetConfig( backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,) return config def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = val def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :] # fmt: on def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : int = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[ """state_dict""" ] for name, param in state_dict.items(): print(_snake_case ,param.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case ) if "bn" in key: SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" ) SCREAMING_SNAKE_CASE__ : Dict = val # rename keys SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case ) for src, dest in rename_keys: rename_key(_snake_case ,_snake_case ,_snake_case ) read_in_q_k_v(_snake_case ,config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case ) if "norm" in key: SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case ) model.load_state_dict(_snake_case ) # verify on image SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor() SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits print(logits.shape ) print("""First values of logits:""" ,logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE__ : Dict = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("""Logits:""" ,outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-swin-tiny', type=str, choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']], help='Name of the Swin + UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase__ : List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
25
1
def A ( a_ ) -> str: __UpperCamelCase : set[int] =set() # To detect a back edge, keep track of vertices currently in the recursion stack __UpperCamelCase : set[int] =set() return any( node not in visited and depth_first_search(a_ ,a_ ,a_ ,a_ ) for node in graph ) def A ( a_ ,a_ ,a_ ,a_ ) -> Optional[Any]: visited.add(a_ ) rec_stk.add(a_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(a_ ,a_ ,a_ ,a_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(a_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
360
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging A_ :Union[str, Any] = logging.get_logger(__name__) A_ :Tuple = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class __A ( a ): """simple docstring""" UpperCamelCase__ : Any ="""xlnet""" UpperCamelCase__ : Tuple =["""mems"""] UpperCamelCase__ : Any ={ """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowerCamelCase__=32000 , lowerCamelCase__=1024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4096 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__="bi" , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=-1 , lowerCamelCase__=False , lowerCamelCase__="last" , lowerCamelCase__=True , lowerCamelCase__="tanh" , lowerCamelCase__=0.1 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ): """simple docstring""" __UpperCamelCase : Optional[int] =vocab_size __UpperCamelCase : int =d_model __UpperCamelCase : Optional[Any] =n_layer __UpperCamelCase : str =n_head if d_model % n_head != 0: raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' ) __UpperCamelCase : Optional[Any] =d_model // n_head __UpperCamelCase : List[Any] =ff_activation __UpperCamelCase : Tuple =d_inner __UpperCamelCase : List[Any] =untie_r __UpperCamelCase : List[Any] =attn_type __UpperCamelCase : Dict =initializer_range __UpperCamelCase : List[str] =layer_norm_eps __UpperCamelCase : List[str] =dropout __UpperCamelCase : int =mem_len __UpperCamelCase : List[Any] =reuse_len __UpperCamelCase : Union[str, Any] =bi_data __UpperCamelCase : Optional[Any] =clamp_len __UpperCamelCase : Tuple =same_length __UpperCamelCase : int =summary_type __UpperCamelCase : Dict =summary_use_proj __UpperCamelCase : Dict =summary_activation __UpperCamelCase : str =summary_last_dropout __UpperCamelCase : Dict =start_n_top __UpperCamelCase : Optional[Any] =end_n_top __UpperCamelCase : int =bos_token_id __UpperCamelCase : Union[str, Any] =pad_token_id __UpperCamelCase : Dict =eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , lowerCamelCase__ , ) __UpperCamelCase : Dict =kwargs['use_cache'] __UpperCamelCase : Optional[int] =use_mems_eval __UpperCamelCase : Any =use_mems_train super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ ) @property def __lowercase ( self ): """simple docstring""" logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' ) return -1 @max_position_embeddings.setter def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" raise NotImplementedError( f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
245
0
"""simple docstring""" import sys from pathlib import Path SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) SCREAMING_SNAKE_CASE__ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} SCREAMING_SNAKE_CASE__ = 'zero2' SCREAMING_SNAKE_CASE__ = 'zero3' SCREAMING_SNAKE_CASE__ = [ZEROa, ZEROa] def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] ) -> Optional[int]: """simple docstring""" snake_case = parameterized.to_safe_name('_'.join(str(snake_case_ ) for x in param.args ) ) return f"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test SCREAMING_SNAKE_CASE__ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCAmelCase_ ( UpperCamelCase__ ): """simple docstring""" @parameterized.expand(__a , name_func=__a ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) @require_torch_multi_gpu @parameterized.expand(__a , name_func=__a ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) @parameterized.expand(__a , name_func=__a ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) @require_torch_multi_gpu @parameterized.expand(__a , name_func=__a ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" pass def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 10 , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = True , ): """simple docstring""" snake_case = models[model] snake_case = self.run_trainer( stage=__a , model_name=__a , eval_steps=__a , num_train_epochs=1 , distributed=__a , fpaa=__a , ) self.do_checks(__a ) return output_dir def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 10 , lowerCAmelCase = 1 , lowerCAmelCase = True , lowerCAmelCase = True , ): """simple docstring""" snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=__a ) snake_case = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(__a )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files snake_case = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() snake_case = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] snake_case = self.get_launcher(__a ) snake_case = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__a , env=self.get_env() ) return output_dir def snake_case ( self , lowerCAmelCase=False ): """simple docstring""" snake_case = min(2 , get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
150
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b" UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) ) return "0b" + "".join( str(int("1" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
"""simple docstring""" import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed a :Tuple = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f'{bindir}/../../examples/pytorch/translation'): from run_translation import main # noqa set_seed(42) a :Dict = "sshleifer/student_marian_en_ro_6_1" a :Optional[Any] = "sshleifer/tiny-mbart" @require_torch class __a (UpperCamelCase_): '''simple docstring''' def _a ( self , _a=False , _a=None , _a=True , _a=True , _a=True , _a=True , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.run_trainer( eval_steps=1 , max_len=12 , model_name=_a , num_train_epochs=1 , distributed=_a , extra_args_str=_a , predict_with_generate=_a , do_train=_a , do_eval=_a , do_predict=_a , ) SCREAMING_SNAKE_CASE__ : Any = TrainerState.load_from_json(os.path.join(_a , """trainer_state.json""" ) ).log_history if not do_eval: return SCREAMING_SNAKE_CASE__ : int = [log for log in logs if """eval_loss""" in log.keys()] SCREAMING_SNAKE_CASE__ : Dict = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats SCREAMING_SNAKE_CASE__ : List[str] = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , _a ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def _a ( self ) -> Union[str, Any]: """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def _a ( self ) -> Optional[int]: """simple docstring""" self.run_seqaseq_quick(distributed=_a ) @require_torch_multi_gpu def _a ( self ) -> str: """simple docstring""" self.run_seqaseq_quick(distributed=_a ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _a ( self ) -> List[str]: """simple docstring""" self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _a ( self ) -> List[str]: """simple docstring""" self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _a ( self ) -> Union[str, Any]: """simple docstring""" self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=_a ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _a ( self ) -> List[Any]: """simple docstring""" self.run_seqaseq_quick( distributed=_a , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=_a ) @require_apex @require_torch_gpu def _a ( self ) -> List[str]: """simple docstring""" self.run_seqaseq_quick(distributed=_a , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=_a , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def _a ( self , _a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } SCREAMING_SNAKE_CASE__ : List[str] = experiments[experiment_id] SCREAMING_SNAKE_CASE__ : List[Any] = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} SCREAMING_SNAKE_CASE__ : Optional[Any] = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**_a , extra_args_str=data["""extra_args_str"""] ) SCREAMING_SNAKE_CASE__ : Any = len(re.findall(_a , cl.err ) ) self.assertEqual(_a , data["""n_matches"""] ) @slow def _a ( self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.run_trainer( eval_steps=2 , max_len=128 , model_name=_a , learning_rate=3E-4 , num_train_epochs=10 , distributed=_a , ) # Check metrics SCREAMING_SNAKE_CASE__ : Any = TrainerState.load_from_json(os.path.join(_a , """trainer_state.json""" ) ).log_history SCREAMING_SNAKE_CASE__ : str = [log for log in logs if """eval_loss""" in log.keys()] SCREAMING_SNAKE_CASE__ : int = eval_metrics[0] SCREAMING_SNAKE_CASE__ : Optional[int] = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , _a ) # test if do_predict saves generations and metrics SCREAMING_SNAKE_CASE__ : List[Any] = os.listdir(_a ) SCREAMING_SNAKE_CASE__ : List[Any] = {os.path.basename(_a ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def _a ( self ) -> List[Any]: """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(_a ) -> Tuple[int, float]: SCREAMING_SNAKE_CASE__ : Optional[Any] = """--skip_memory_metrics 0""" SCREAMING_SNAKE_CASE__ : Any = self.run_trainer( max_len=128 , model_name=_a , learning_rate=3E-4 , num_train_epochs=1 , optim=_a , distributed=_a , extra_args_str=_a , do_eval=_a , do_predict=_a , n_gpus_to_use=1 , ) # Check metrics SCREAMING_SNAKE_CASE__ : Any = TrainerState.load_from_json(Path(_a , """trainer_state.json""" ) ).log_history SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) SCREAMING_SNAKE_CASE__ : Tuple = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss SCREAMING_SNAKE_CASE__ : Dict = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) SCREAMING_SNAKE_CASE__ : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) SCREAMING_SNAKE_CASE__ : Dict = gpu_alloc_mem_orig - gpu_alloc_mem_bnb SCREAMING_SNAKE_CASE__ : Union[str, Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig SCREAMING_SNAKE_CASE__ : int = gpu_peak_mem_bnb + gpu_alloc_mem_bnb SCREAMING_SNAKE_CASE__ : List[Any] = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings SCREAMING_SNAKE_CASE__ : Any = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( _a , _a , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( _a , _a , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( _a , _a , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def _a ( self , _a , _a , _a , _a = 3E-3 , _a = "adafactor" , _a = False , _a = None , _a = 0 , _a = True , _a = True , _a = True , _a = True , _a = None , ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" SCREAMING_SNAKE_CASE__ : List[Any] = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE__ : Union[str, Any] = f''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(_a )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(_a )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() SCREAMING_SNAKE_CASE__ : str = f''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(_a )} '''.split() SCREAMING_SNAKE_CASE__ : int = """ --do_predict """.split() SCREAMING_SNAKE_CASE__ : List[Any] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_gpu_count() SCREAMING_SNAKE_CASE__ : List[Any] = get_torch_dist_unique_port() SCREAMING_SNAKE_CASE__ : int = f''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() SCREAMING_SNAKE_CASE__ : Optional[int] = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_a , env=self.get_env() ) else: SCREAMING_SNAKE_CASE__ : Dict = ["""run_translation.py"""] + args with patch.object(_a , """argv""" , _a ): main() return output_dir
363
"""simple docstring""" def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int: return number | (1 << position) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int: return number & ~(1 << position) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int: return number ^ (1 << position) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool: return ((number >> position) & 1) == 1 def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
56
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( UpperCAmelCase__ ): __lowerCamelCase : Tuple = ["image_processor", "tokenizer"] __lowerCamelCase : Dict = "LayoutLMv2ImageProcessor" __lowerCamelCase : List[str] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]: if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) _lowerCAmelCase = kwargs.pop("feature_extractor" ) _lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> int: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes " "if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." ) # first, apply the image processor _lowerCAmelCase = self.image_processor(images=_a , return_tensors=_a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_a , _a ): _lowerCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension) _lowerCAmelCase = features["words"] _lowerCAmelCase = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel values _lowerCAmelCase = features.pop("pixel_values" ) if return_overflowing_tokens is True: _lowerCAmelCase = self.get_overflowing_images(_a , encoded_inputs["overflow_to_sample_mapping"] ) _lowerCAmelCase = images return encoded_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_a ) != len(_a ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f''' {len(_a )} and {len(_a )}''' ) return images_with_overflow def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: return self.tokenizer.batch_decode(*_a , **_a ) def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: return self.tokenizer.decode(*_a , **_a ) @property def _snake_case ( self ) -> List[Any]: return ["input_ids", "bbox", "attention_mask", "image"] @property def _snake_case ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def _snake_case ( self ) -> List[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
158
"""simple docstring""" from typing import Any def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list: _validation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) # Creates data structures and fill initial step lowerCamelCase = {} lowerCamelCase = {} for state in states_space: lowerCamelCase = observations_space[0] lowerCamelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCamelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case__ ) ): lowerCamelCase = observations_space[o] lowerCamelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state # Update probabilities and pointers dicts lowerCamelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCamelCase = arg_max # The final observation lowerCamelCase = observations_space[len(snake_case__ ) - 1] # argmax for given final observation lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state lowerCamelCase = arg_max # Process pointers backwards lowerCamelCase = last_state lowerCamelCase = [] for o in range(len(snake_case__ ) - 1 , -1 , -1 ): result.append(snake_case__ ) lowerCamelCase = pointers[previous, observations_space[o]] result.reverse() return result def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_not_empty( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) _validate_lists(snake_case__ , snake_case__ ) _validate_dicts( snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_list(snake_case__ , """observations_space""" ) _validate_list(snake_case__ , """states_space""" ) def a__ ( snake_case__ , snake_case__ ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a list' raise ValueError(snake_case__ ) else: for x in _object: if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'{var_name} must be a list of strings' raise ValueError(snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ ) _validate_nested_dict(snake_case__ , """transition_probabilities""" ) _validate_nested_dict(snake_case__ , """emission_probabilities""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_dict(_object , snake_case__ , snake_case__ ) for x in _object.values(): _validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a dict' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ): lowerCamelCase = F'{var_name} all keys must be strings' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ): lowerCamelCase = """nested dictionary """ if nested else """""" lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(snake_case__ ) if __name__ == "__main__": from doctest import testmod testmod()
291
0
"""simple docstring""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCamelCase_ : Optional[int] = logging.get_logger(__name__) lowerCamelCase_ : Dict = {"""vocab_file""": """spiece.model"""} lowerCamelCase_ : List[str] = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), } } lowerCamelCase_ : Optional[Any] = { """google/bigbird-roberta-base""": 4_096, """google/bigbird-roberta-large""": 4_096, """google/bigbird-base-trivia-itc""": 4_096, } class a__ ( __snake_case ): A__ : Tuple = VOCAB_FILES_NAMES A__ : int = PRETRAINED_VOCAB_FILES_MAP A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[Any] = ['input_ids', 'attention_mask'] A__ : List[int] = [] def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[MASK]" , UpperCAmelCase="[CLS]" , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: __a = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token __a = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token __a = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token __a = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token __a = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token __a = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __a = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token __a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , sep_token=UpperCAmelCase , mask_token=UpperCAmelCase , cls_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) __a = vocab_file __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return self.sp_model.get_piece_size() def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: __a = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Tuple: __a = self.__dict__.copy() __a = None return state def __setstate__( self , UpperCAmelCase ) -> Dict: __a = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a = {} __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]: return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[Any]: return self.sp_model.piece_to_id(UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]: __a = self.sp_model.IdToPiece(UpperCAmelCase ) return token def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[int]: __a = [] __a = '' __a = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase ) + token __a = True __a = [] else: current_sub_tokens.append(UpperCAmelCase ) __a = False out_string += self.sp_model.decode(UpperCAmelCase ) return out_string.strip() def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , **UpperCAmelCase , ) -> str: __a = kwargs.pop('use_source_tokenizer' , UpperCAmelCase ) __a = self.convert_ids_to_tokens(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __a = [] __a = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase ) ) __a = [] sub_texts.append(UpperCAmelCase ) else: current_sub_text.append(UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __a = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(UpperCAmelCase ) ) else: __a = ''.join(UpperCAmelCase ) __a = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __a = self.clean_up_tokenization(UpperCAmelCase ) return clean_text else: return text def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a = os.path.join( UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , 'wb' ) as fi: __a = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a = [self.cls_token_id] __a = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase )) + [1] return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1] def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
359
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ : Tuple = logging.get_logger(__name__) lowerCamelCase_ : List[Any] = { """google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""", """google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json""" # See all FNet models at https://huggingface.co/models?filter=fnet } class a__ ( __snake_case ): A__ : Dict = 'fnet' def __init__( self , UpperCAmelCase=3_2_0_0_0 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=4 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=False , UpperCAmelCase=5_1_2 , UpperCAmelCase=3 , UpperCAmelCase=1 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Dict: super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) __a = vocab_size __a = max_position_embeddings __a = hidden_size __a = num_hidden_layers __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = initializer_range __a = type_vocab_size __a = layer_norm_eps __a = use_tpu_fourier_optimizations __a = tpu_short_seq_length
197
0
from math import ceil, sqrt def _lowerCAmelCase ( __lowerCAmelCase = 1000000 ) -> int: """simple docstring""" snake_case__ : Dict = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: snake_case__ : Any = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: snake_case__ : int = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f"""{solution() = }""")
230
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class a ( unittest.TestCase ): __lowerCAmelCase : Any = MODEL_FOR_MASKED_LM_MAPPING __lowerCAmelCase : Optional[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING def __lowerCamelCase ( self :str ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def __lowerCamelCase ( self :Any ): snake_case__ : Optional[Any] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''tf''' ) snake_case__ : int = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ {'''sequence''': '''My name is grouped''', '''score''': 2.1e-0_5, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''}, {'''sequence''': '''My name is accuser''', '''score''': 2.1e-0_5, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''}, ] ,) snake_case__ : int = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ { '''sequence''': '''The largest city in France is grouped''', '''score''': 2.1e-0_5, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped''', }, { '''sequence''': '''The largest city in France is accuser''', '''score''': 2.1e-0_5, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser''', }, ] ,) snake_case__ : Optional[int] = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ {'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Patrick''', '''score''': 2e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 1.9e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''}, ] ,) @require_torch def __lowerCamelCase ( self :Optional[int] ): snake_case__ : str = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''pt''' ) snake_case__ : str = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ {'''sequence''': '''My name is Maul''', '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''}, {'''sequence''': '''My name isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''}, ] ,) snake_case__ : List[str] = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ { '''sequence''': '''The largest city in France is Maul''', '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul''', }, {'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''}, ] ,) snake_case__ : Union[str, Any] = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ {'''sequence''': '''My name is Patrick''', '''score''': 2.1e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 2e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''}, {'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''}, ] ,) snake_case__ : Optional[int] = unmasker('''My name is <mask> <mask>''' ,top_k=2 ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ [ { '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is Maul<mask></s>''', }, {'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''}, ], [ { '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is<mask> Maul</s>''', }, {'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''}, ], ] ,) @require_torch_gpu def __lowerCamelCase ( self :int ): snake_case__ : Optional[int] = pipeline('''fill-mask''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,device=0 ,framework='''pt''' ) # convert model to fp16 pipe.model.half() snake_case__ : List[str] = pipe('''Paris is the [MASK] of France.''' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__lowercase ,__lowercase ) @slow @require_torch def __lowerCamelCase ( self :str ): snake_case__ : List[str] = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''pt''' ) self.run_large_test(__lowercase ) @slow @require_tf def __lowerCamelCase ( self :Any ): snake_case__ : Optional[Any] = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''tf''' ) self.run_large_test(__lowercase ) def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ): snake_case__ : Optional[Any] = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ) ,[ {'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_1_0, '''token_str''': ''' John'''}, {'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''}, ] ,) snake_case__ : str = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ) ,[ { '''sequence''': '''The largest city in France is Paris''', '''score''': 0.251, '''token''': 2_2_0_1, '''token_str''': ''' Paris''', }, { '''sequence''': '''The largest city in France is Lyon''', '''score''': 0.214, '''token''': 1_2_7_9_0, '''token_str''': ''' Lyon''', }, ] ,) snake_case__ : Dict = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 ) self.assertEqual( nested_simplify(__lowercase ) ,[ {'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2_9_4_1, '''token_str''': ''' Te'''}, ] ,) @require_torch def __lowerCamelCase ( self :List[str] ): snake_case__ : List[Any] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''pt''' ) snake_case__ : str = None snake_case__ : int = None self.run_pipeline_test(__lowercase ,[] ) @require_tf def __lowerCamelCase ( self :int ): snake_case__ : Optional[int] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''tf''' ) snake_case__ : int = None snake_case__ : List[str] = None self.run_pipeline_test(__lowercase ,[] ) def __lowerCamelCase ( self :Any ,__lowercase :Any ,__lowercase :str ,__lowercase :Union[str, Any] ): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' ) snake_case__ : Optional[int] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) snake_case__ : List[str] = [ F"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ): snake_case__ : List[str] = fill_masker.tokenizer snake_case__ : List[Any] = fill_masker.model snake_case__ : Dict = fill_masker( F"""This is a {tokenizer.mask_token}""" ,) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) snake_case__ : Tuple = fill_masker([F"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) snake_case__ : List[str] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( __lowercase ,[ [ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ], [ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ], ] ,) with self.assertRaises(__lowercase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__lowercase ): fill_masker('''This is''' ) self.run_test_top_k(__lowercase ,__lowercase ) self.run_test_targets(__lowercase ,__lowercase ) self.run_test_top_k_targets(__lowercase ,__lowercase ) self.fill_mask_with_duplicate_targets_and_top_k(__lowercase ,__lowercase ) self.fill_mask_with_multiple_masks(__lowercase ,__lowercase ) def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :int ): snake_case__ : int = tokenizer.get_vocab() snake_case__ : Dict = sorted(vocab.keys() )[:2] # Pipeline argument snake_case__ : List[Any] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ,targets=__lowercase ) snake_case__ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) snake_case__ : Optional[Any] = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} ,__lowercase ) snake_case__ : Any = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} ,set(__lowercase ) ) # Call argument snake_case__ : str = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) snake_case__ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase ) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) snake_case__ : str = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} ,__lowercase ) snake_case__ : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} ,set(__lowercase ) ) # Score equivalence snake_case__ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase ) snake_case__ : Union[str, Any] = [top_mask['''token_str'''] for top_mask in outputs] snake_case__ : Tuple = [top_mask['''score'''] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__lowercase ) == set(__lowercase ): snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase ) snake_case__ : int = [top_mask['''score'''] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) ) # Raises with invalid with self.assertRaises(__lowercase ): snake_case__ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__lowercase ): snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[''''''] ) with self.assertRaises(__lowercase ): snake_case__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets='''''' ) def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :Dict ): snake_case__ : int = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ,top_k=2 ) snake_case__ : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) snake_case__ : Any = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) snake_case__ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 ) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) ) def __lowerCamelCase ( self :List[Any] ,__lowercase :Tuple ,__lowercase :str ): snake_case__ : Optional[int] = tokenizer.get_vocab() snake_case__ : int = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) # top_k=2, ntargets=3 snake_case__ : int = sorted(vocab.keys() )[:3] snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 ,targets=__lowercase ) # If we use the most probably targets, and filter differently, we should still # have the same results snake_case__ : Dict = [el['''token_str'''] for el in sorted(__lowercase ,key=lambda __lowercase : x["score"] ,reverse=__lowercase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__lowercase ).issubset(__lowercase ): snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=3 ,targets=__lowercase ) # They should yield exactly the same result self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) ) def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Dict ,__lowercase :Dict ): snake_case__ : Union[str, Any] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) snake_case__ : str = tokenizer.get_vocab() # String duplicates + id duplicates snake_case__ : int = sorted(vocab.keys() )[:3] snake_case__ : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]] snake_case__ : Optional[Any] = fill_masker(F"""My name is {tokenizer.mask_token}""" ,targets=__lowercase ,top_k=1_0 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__lowercase ) ,3 ) def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ): snake_case__ : Any = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) snake_case__ : Tuple = fill_masker( F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" ,top_k=2 ) self.assertEqual( __lowercase ,[ [ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ], [ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ], [ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ], ] ,)
230
1
'''simple docstring''' def snake_case_ (UpperCamelCase : int = 3 , UpperCamelCase : int = 7 , UpperCamelCase : int = 100_0000 ): '''simple docstring''' _a = 0 _a = 1 for current_denominator in range(1 , limit + 1 ): _a = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: _a = current_numerator _a = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1000000))
370
'''simple docstring''' import copy import random from transformers import CLIPTokenizer class A ( _a ): def __init__( self : str , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ) -> List[Any]: """simple docstring""" super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) _a = {} def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Tuple , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Tuple ) -> str: """simple docstring""" _a = super().add_tokens(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) if num_added_tokens == 0: raise ValueError( F'The tokenizer already contains the token {placeholder_token}. Please pass a different' ''' `placeholder_token` that is not already in the tokenizer.''' ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : int ) -> Any: """simple docstring""" _a = [] if num_vec_per_token == 1: self.try_adding_tokens(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) output.append(lowerCAmelCase_ ) else: _a = [] for i in range(lowerCAmelCase_ ): _a = placeholder_token + F'_{i}' self.try_adding_tokens(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) output.append(lowerCAmelCase_ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'The tokenizer already has placeholder token {token} that can get confused with' F' {placeholder_token}keep placeholder tokens independent' ) _a = output def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[int]=1.0 ) -> Tuple: """simple docstring""" if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _a = [] for i in range(len(lowerCAmelCase_ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCAmelCase_ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: _a = self.token_map[placeholder_token] _a = tokens[: 1 + int(len(lowerCAmelCase_ ) * prop_tokens_to_load )] if vector_shuffle: _a = copy.copy(lowerCAmelCase_ ) random.shuffle(lowerCAmelCase_ ) _a = text.replace(lowerCAmelCase_ , ''' '''.join(lowerCAmelCase_ ) ) return text def __call__( self : List[str] , lowerCAmelCase_ : str , *lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Union[str, Any]=1.0 , **lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return super().__call__( self.replace_placeholder_tokens_in_text( lowerCAmelCase_ , vector_shuffle=lowerCAmelCase_ , prop_tokens_to_load=lowerCAmelCase_ ) , *lowerCAmelCase_ , **lowerCAmelCase_ , ) def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : int , *lowerCAmelCase_ : str , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=1.0 , **lowerCAmelCase_ : Union[str, Any] ) -> Dict: """simple docstring""" return super().encode( self.replace_placeholder_tokens_in_text( lowerCAmelCase_ , vector_shuffle=lowerCAmelCase_ , prop_tokens_to_load=lowerCAmelCase_ ) , *lowerCAmelCase_ , **lowerCAmelCase_ , )
179
0
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class lowercase__ ( __lowerCamelCase ): '''simple docstring''' a : int = ["image_processor", "tokenizer"] a : int = "BlipImageProcessor" a : Optional[int] = "AutoTokenizer" def __init__( self, __magic_name__, __magic_name__, __magic_name__ ) -> Dict: """simple docstring""" super().__init__(__magic_name__, __magic_name__ ) # add QFormer tokenizer UpperCamelCase__ : Any = qformer_tokenizer def __call__( self, __magic_name__ = None, __magic_name__ = None, __magic_name__ = True, __magic_name__ = False, __magic_name__ = None, __magic_name__ = None, __magic_name__ = 0, __magic_name__ = None, __magic_name__ = None, __magic_name__ = False, __magic_name__ = False, __magic_name__ = False, __magic_name__ = False, __magic_name__ = False, __magic_name__ = True, __magic_name__ = None, **__magic_name__, ) -> BatchFeature: """simple docstring""" if images is None and text is None: raise ValueError('''You have to specify at least images or text.''' ) UpperCamelCase__ : Any = BatchFeature() if text is not None: UpperCamelCase__ : List[str] = self.tokenizer( text=__magic_name__, add_special_tokens=__magic_name__, padding=__magic_name__, truncation=__magic_name__, max_length=__magic_name__, stride=__magic_name__, pad_to_multiple_of=__magic_name__, return_attention_mask=__magic_name__, return_overflowing_tokens=__magic_name__, return_special_tokens_mask=__magic_name__, return_offsets_mapping=__magic_name__, return_token_type_ids=__magic_name__, return_length=__magic_name__, verbose=__magic_name__, return_tensors=__magic_name__, **__magic_name__, ) encoding.update(__magic_name__ ) UpperCamelCase__ : int = self.qformer_tokenizer( text=__magic_name__, add_special_tokens=__magic_name__, padding=__magic_name__, truncation=__magic_name__, max_length=__magic_name__, stride=__magic_name__, pad_to_multiple_of=__magic_name__, return_attention_mask=__magic_name__, return_overflowing_tokens=__magic_name__, return_special_tokens_mask=__magic_name__, return_offsets_mapping=__magic_name__, return_token_type_ids=__magic_name__, return_length=__magic_name__, verbose=__magic_name__, return_tensors=__magic_name__, **__magic_name__, ) UpperCamelCase__ : Optional[int] = qformer_text_encoding.pop('''input_ids''' ) UpperCamelCase__ : Dict = qformer_text_encoding.pop('''attention_mask''' ) if images is not None: UpperCamelCase__ : List[Any] = self.image_processor(__magic_name__, return_tensors=__magic_name__ ) encoding.update(__magic_name__ ) return encoding def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*__magic_name__, **__magic_name__ ) def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> List[str]: """simple docstring""" return self.tokenizer.decode(*__magic_name__, **__magic_name__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def UpperCamelCase__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase__ : Optional[int] = self.tokenizer.model_input_names UpperCamelCase__ : List[str] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def UpperCamelCase__ ( self, __magic_name__, **__magic_name__ ) -> str: """simple docstring""" if os.path.isfile(__magic_name__ ): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" ) os.makedirs(__magic_name__, exist_ok=__magic_name__ ) UpperCamelCase__ : List[Any] = os.path.join(__magic_name__, '''qformer_tokenizer''' ) self.qformer_tokenizer.save_pretrained(__magic_name__ ) return super().save_pretrained(__magic_name__, **__magic_name__ ) @classmethod def UpperCamelCase__ ( cls, __magic_name__, **__magic_name__ ) -> Optional[int]: """simple docstring""" UpperCamelCase__ : str = AutoTokenizer.from_pretrained(__magic_name__, subfolder='''qformer_tokenizer''' ) UpperCamelCase__ : Union[str, Any] = cls._get_arguments_from_pretrained(__magic_name__, **__magic_name__ ) args.append(__magic_name__ ) return cls(*__magic_name__ )
201
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> bool: return str(__UpperCAmelCase ) == str(__UpperCAmelCase )[::-1] def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> int: return int(__UpperCAmelCase ) + int(str(__UpperCAmelCase )[::-1] ) def lowerCAmelCase_ ( __UpperCAmelCase: int = 1_0000 ) -> int: UpperCamelCase__ : Optional[Any] = [] for num in range(1 , __UpperCAmelCase ): UpperCamelCase__ : str = 0 UpperCamelCase__ : Any = num while iterations < 50: UpperCamelCase__ : List[Any] = sum_reverse(__UpperCAmelCase ) iterations += 1 if is_palindrome(__UpperCAmelCase ): break else: lychrel_nums.append(__UpperCAmelCase ) return len(__UpperCAmelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
201
1
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class __UpperCAmelCase : def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return None class __UpperCAmelCase : def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return None class __UpperCAmelCase ( unittest.TestCase ): __lowercase = [ # (model_name, model_kwargs) ("""bert-base-cased""", {}), ("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def lowerCamelCase ( self ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(snake_case__ , 'tf' , 12 , **snake_case__ ) @require_torch @slow def lowerCamelCase ( self ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(snake_case__ , 'pt' , 12 , **snake_case__ ) @require_torch @slow def lowerCamelCase ( self ): """simple docstring""" from transformers import BertModel _snake_case = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(snake_case__ ) ) vocab_file.flush() _snake_case = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: _snake_case = BertModel(BertConfig(vocab_size=len(snake_case__ ) ) ) model.save_pretrained(snake_case__ ) self._test_export(snake_case__ , 'pt' , 12 , snake_case__ ) @require_tf @slow def lowerCamelCase ( self ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: _snake_case = self._test_export(snake_case__ , 'tf' , 12 , **snake_case__ ) _snake_case = quantize(Path(snake_case__ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(snake_case__ ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def lowerCamelCase ( self ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: _snake_case = self._test_export(snake_case__ , 'pt' , 12 , **snake_case__ ) _snake_case = quantize(snake_case__ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(snake_case__ ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ): """simple docstring""" try: # Compute path with TemporaryDirectory() as tempdir: _snake_case = Path(snake_case__ ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ) return path except Exception as e: self.fail(snake_case__ ) @require_torch @require_tokenizers @slow def lowerCamelCase ( self ): """simple docstring""" from transformers import BertModel _snake_case = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) _snake_case = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(snake_case__ , snake_case__ , 'pt' ) @require_tf @require_tokenizers @slow def lowerCamelCase ( self ): """simple docstring""" from transformers import TFBertModel _snake_case = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) _snake_case = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(snake_case__ , snake_case__ , 'tf' ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case = FeatureExtractionPipeline(snake_case__ , snake_case__ ) _snake_case = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] _snake_case = infer_shapes(snake_case__ , snake_case__ ) # Assert all variables are present self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , snake_case__ ) self.assertSequenceEqual(variable_names[3:] , snake_case__ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] , {0: 'batch'} ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = ['input_ids', 'attention_mask', 'token_type_ids'] _snake_case = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} _snake_case = ensure_valid_input(FuncContiguousArgs() , snake_case__ , snake_case__ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(snake_case__ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(snake_case__ ) , set(snake_case__ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(snake_case__ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) _snake_case = ensure_valid_input(FuncNonContiguousArgs() , snake_case__ , snake_case__ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(snake_case__ ) , 1 ) self.assertEqual(len(snake_case__ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] , 'input_ids' ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
353
'''simple docstring''' from __future__ import annotations from collections import namedtuple def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> tuple: _snake_case = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
160
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : List[str] = { "configuration_time_series_transformer": [ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimeSeriesTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
252
'''simple docstring''' from collections.abc import Generator from math import sin def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) != 32: raise ValueError("Input must be of length 32" ) UpperCAmelCase : Union[str, Any] = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:] UpperCAmelCase : List[str] = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = b"" for char in message: bit_string += format(__magic_name__ , "08b" ).encode("utf-8" ) UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512] UpperCAmelCase : Tuple = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Any = format(__magic_name__ , "032b" ) UpperCAmelCase : int = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return (a + b) % 2**32 def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = preprocess(__magic_name__ ) UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCAmelCase : List[str] = 0X67452301 UpperCAmelCase : Tuple = 0XEFCDAB89 UpperCAmelCase : List[Any] = 0X98BADCFE UpperCAmelCase : List[str] = 0X10325476 UpperCAmelCase : Dict = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCAmelCase : Optional[Any] = aa UpperCAmelCase : List[Any] = ba UpperCAmelCase : Optional[Any] = ca UpperCAmelCase : Any = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCAmelCase : Tuple = d ^ (b & (c ^ d)) UpperCAmelCase : List[str] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCAmelCase : int = c ^ (d & (b ^ c)) UpperCAmelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: UpperCAmelCase : Any = b ^ c ^ d UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16 else: UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ )) UpperCAmelCase : Dict = (7 * i) % 16 UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCAmelCase : List[Any] = d UpperCAmelCase : Any = c UpperCAmelCase : Dict = b UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
311
0
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __snake_case ( pl.LightningModule ): def __init__( self : str , _snake_case : List[str]): """simple docstring""" super().__init__() UpperCAmelCase_ = model UpperCAmelCase_ = 2 UpperCAmelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels) def lowerCamelCase ( self : int): """simple docstring""" pass def A (__A : str , __A : str , __A : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = LongformerModel.from_pretrained(__A ) UpperCAmelCase_ = LightningModel(__A ) UpperCAmelCase_ = torch.load(__A , map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model UpperCAmelCase_ = LongformerForQuestionAnswering.from_pretrained(__A ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(__A ) print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": snake_case_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lightning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) snake_case_ : List[str] = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
352
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
7
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = KandinskyVaaControlnetPipeline SCREAMING_SNAKE_CASE = ["""image_embeds""", """negative_image_embeds""", """hint"""] SCREAMING_SNAKE_CASE = ["""image_embeds""", """negative_image_embeds""", """hint"""] SCREAMING_SNAKE_CASE = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] SCREAMING_SNAKE_CASE = False @property def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]: """simple docstring""" return 32 @property def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]: """simple docstring""" return 32 @property def _SCREAMING_SNAKE_CASE ( self: Any) -> List[str]: """simple docstring""" return self.time_input_dim @property def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any: """simple docstring""" return self.time_input_dim * 4 @property def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[Any]: """simple docstring""" return 100 @property def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict: """simple docstring""" torch.manual_seed(0) __lowerCAmelCase : Optional[Any] = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase : Dict = UNetaDConditionModel(**_UpperCAmelCase) return model @property def _SCREAMING_SNAKE_CASE ( self: str) -> Any: """simple docstring""" return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any: """simple docstring""" torch.manual_seed(0) __lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs) return model def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Tuple = self.dummy_unet __lowerCAmelCase : Dict = self.dummy_movq __lowerCAmelCase : Any = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_UpperCAmelCase , ) __lowerCAmelCase : str = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any]=0) -> int: """simple docstring""" __lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase)).to(_UpperCAmelCase) __lowerCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( _UpperCAmelCase) # create hint __lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase)).to(_UpperCAmelCase) if str(_UpperCAmelCase).startswith("mps"): __lowerCAmelCase : Optional[Any] = torch.manual_seed(_UpperCAmelCase) else: __lowerCAmelCase : Dict = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase) __lowerCAmelCase : int = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def _SCREAMING_SNAKE_CASE ( self: int) -> int: """simple docstring""" __lowerCAmelCase : Optional[int] = "cpu" __lowerCAmelCase : List[str] = self.get_dummy_components() __lowerCAmelCase : Optional[Any] = self.pipeline_class(**_UpperCAmelCase) __lowerCAmelCase : Union[str, Any] = pipe.to(_UpperCAmelCase) pipe.set_progress_bar_config(disable=_UpperCAmelCase) __lowerCAmelCase : str = pipe(**self.get_dummy_inputs(_UpperCAmelCase)) __lowerCAmelCase : str = output.images __lowerCAmelCase : Tuple = pipe( **self.get_dummy_inputs(_UpperCAmelCase) , return_dict=_UpperCAmelCase , )[0] __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : str = np.array( [0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict: """simple docstring""" __lowerCAmelCase : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") __lowerCAmelCase : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") __lowerCAmelCase : Any = torch.from_numpy(np.array(_UpperCAmelCase)).float() / 255.0 __lowerCAmelCase : Optional[int] = hint.permute(2 , 0 , 1).unsqueeze(0) __lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(_UpperCAmelCase) __lowerCAmelCase : Dict = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) __lowerCAmelCase : Any = pipeline.to(_UpperCAmelCase) pipeline.set_progress_bar_config(disable=_UpperCAmelCase) __lowerCAmelCase : Dict = "A robot, 4k photo" __lowerCAmelCase : List[str] = torch.Generator(device="cuda").manual_seed(0) __lowerCAmelCase , __lowerCAmelCase : List[Any] = pipe_prior( _UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __lowerCAmelCase : Any = torch.Generator(device="cuda").manual_seed(0) __lowerCAmelCase : str = pipeline( image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , hint=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) __lowerCAmelCase : List[Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase)
269
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Optional[int] = { "configuration_squeezebert": [ "SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SqueezeBertConfig", "SqueezeBertOnnxConfig", ], "tokenization_squeezebert": ["SqueezeBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Dict = ["SqueezeBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ "SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "SqueezeBertForMaskedLM", "SqueezeBertForMultipleChoice", "SqueezeBertForQuestionAnswering", "SqueezeBertForSequenceClassification", "SqueezeBertForTokenClassification", "SqueezeBertModel", "SqueezeBertModule", "SqueezeBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
263
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase : int = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = ["input_values", "attention_mask"] def __init__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 1_6000 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = False , __UpperCAmelCase = 80 , __UpperCAmelCase = 16 , __UpperCAmelCase = 64 , __UpperCAmelCase = "hann_window" , __UpperCAmelCase = 1.0 , __UpperCAmelCase = 80 , __UpperCAmelCase = 7600 , __UpperCAmelCase = 1E-10 , __UpperCAmelCase = 2 , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase ) __UpperCamelCase = do_normalize __UpperCamelCase = return_attention_mask __UpperCamelCase = num_mel_bins __UpperCamelCase = hop_length __UpperCamelCase = win_length __UpperCamelCase = win_function __UpperCamelCase = frame_signal_scale __UpperCamelCase = fmin __UpperCamelCase = fmax __UpperCamelCase = mel_floor __UpperCamelCase = reduction_factor __UpperCamelCase = win_length * sampling_rate // 1000 __UpperCamelCase = hop_length * sampling_rate // 1000 __UpperCamelCase = optimal_fft_length(self.sample_size ) __UpperCamelCase = (self.n_fft // 2) + 1 __UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCAmelCase ) __UpperCamelCase = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , __UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , __UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 ): '''simple docstring''' if attention_mask is not None: __UpperCamelCase = np.array(__UpperCAmelCase , np.intaa ) __UpperCamelCase = [] for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ): __UpperCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: __UpperCamelCase = padding_value normed_input_values.append(__UpperCAmelCase ) else: __UpperCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def UpperCAmelCase ( self , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = spectrogram( __UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: __UpperCamelCase = self._process_audio( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) else: __UpperCamelCase = None if audio_target is not None: __UpperCamelCase = self._process_audio( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) if inputs is None: return inputs_target else: __UpperCamelCase = inputs_target['input_values'] __UpperCamelCase = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: __UpperCamelCase = decoder_attention_mask return inputs def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = isinstance(__UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ): __UpperCamelCase = np.asarray(__UpperCAmelCase , dtype=np.floataa ) elif isinstance(__UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [speech] # needed to make pad() work on spectrogram inputs __UpperCamelCase = self.feature_size # convert into correct format for padding if is_target: __UpperCamelCase = [self._extract_mel_features(__UpperCAmelCase ) for waveform in speech] __UpperCamelCase = BatchFeature({'input_values': features} ) __UpperCamelCase = self.num_mel_bins else: __UpperCamelCase = BatchFeature({'input_values': speech} ) __UpperCamelCase = self.pad( __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = feature_size_hack # convert input values to correct format __UpperCamelCase = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): __UpperCamelCase = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(__UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): __UpperCamelCase = [array.astype(np.floataa ) for array in input_values] elif isinstance(__UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): __UpperCamelCase = input_values.astype(np.floataa ) # convert attention_mask to correct format __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: __UpperCamelCase = ( attention_mask if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=__UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__UpperCAmelCase ) return padded_inputs def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = super().to_dict() # Don't serialize these as they are derived from the other properties. __UpperCamelCase = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
263
1
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowerCAmelCase ( yaml.SafeLoader ): """simple docstring""" def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase__ :str = [tuple(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else key for key in keys] lowerCAmelCase__ :Optional[int] = Counter(__UpperCAmelCase ) lowerCAmelCase__ :int = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase ) self._check_no_duplicates_on_constructed_node(__UpperCAmelCase ) return mapping def __A (_SCREAMING_SNAKE_CASE ) ->Tuple[Optional[str], str]: """simple docstring""" lowerCAmelCase__ :Optional[Any] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase__ :Optional[int] = full_content[1:].index('---' ) + 1 lowerCAmelCase__ :Union[str, Any] = '\n'.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :List[str] = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(__UpperCAmelCase ) else: return cls() def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path.exists(): with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ :Optional[Any] = readme_file.read() else: lowerCAmelCase__ :Union[str, Any] = None lowerCAmelCase__ :Union[str, Any] = self._to_readme(__UpperCAmelCase ) with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file: readme_file.write(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase = None ): '''simple docstring''' if readme_content is not None: lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = _split_yaml_from_readme(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content else: lowerCAmelCase__ :str = '---\n' + self.to_yaml_string() + '---\n' return full_content @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase__ :int = { (key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' return yaml.safe_dump( { (key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' ) __A = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser __A = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") __A = ap.parse_args() __A = Path(args.readme_filepath) __A = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
293
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel __A = logging.getLogger(__name__) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" if os.path.exists(_SCREAMING_SNAKE_CASE ): if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) else: os.makedirs(_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ :Dict = 2 if unlogit: lowerCAmelCase__ :List[str] = torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :str = p * torch.log(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[str] = 0 return -plogp.sum(dim=-1 ) def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" logger.info('lv, h >\t' + '\t'.join(F"{x + 1}" for x in range(len(_SCREAMING_SNAKE_CASE ) ) ) ) for row in range(len(_SCREAMING_SNAKE_CASE ) ): if tensor.dtype != torch.long: logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:.5f}" for x in tensor[row].cpu().data ) ) else: logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:d}" for x in tensor[row].cpu().data ) ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ :Dict = model.config.num_hidden_layers, model.config.num_attention_heads lowerCAmelCase__ :Any = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) lowerCAmelCase__ :Tuple = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) if head_mask is None: lowerCAmelCase__ :Optional[int] = torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) head_mask.requires_grad_(requires_grad=_SCREAMING_SNAKE_CASE ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCAmelCase__ :List[str] = None lowerCAmelCase__ :Any = 0.0 lowerCAmelCase__ :Any = 0.0 for step, inputs in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): lowerCAmelCase__ :str = tuple(t.to(args.device ) for t in inputs ) ((lowerCAmelCase__) , ) :Dict = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCAmelCase__ :str = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = entropy(attn.detach() , _SCREAMING_SNAKE_CASE ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_SCREAMING_SNAKE_CASE ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCAmelCase__ :Union[str, Any] = 2 lowerCAmelCase__ :Tuple = torch.pow(torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: lowerCAmelCase__ :str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) logger.info('Head ranked by importance scores' ) lowerCAmelCase__ :List[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCAmelCase__ :List[Any] = torch.arange( head_importance.numel() , device=args.device ) lowerCAmelCase__ :int = head_ranks.view_as(_SCREAMING_SNAKE_CASE ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) return attn_entropy, head_importance, total_loss def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , _SCREAMING_SNAKE_CASE , original_score * args.masking_threshold ) lowerCAmelCase__ :Optional[int] = torch.ones_like(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCAmelCase__ :List[str] = original_score while current_score >= original_score * args.masking_threshold: lowerCAmelCase__ :List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCAmelCase__ :str = float('Inf' ) lowerCAmelCase__ :List[str] = head_importance.view(-1 ).sort()[1] if len(_SCREAMING_SNAKE_CASE ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads lowerCAmelCase__ :int = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) lowerCAmelCase__ :Dict = new_head_mask.view(-1 ) lowerCAmelCase__ :Any = 0.0 lowerCAmelCase__ :Tuple = new_head_mask.view_as(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[int] = new_head_mask.clone().detach() print_ad_tensor(_SCREAMING_SNAKE_CASE ) # Compute metric and head importance again lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , _SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('Final head mask' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = 1 / loss lowerCAmelCase__ :Tuple = datetime.now() - before_time lowerCAmelCase__ :List[str] = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ :List[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_SCREAMING_SNAKE_CASE ) ) } for k, v in heads_to_prune.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Union[str, Any] = [ v, ] assert sum(len(_SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ :int = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , actually_pruned=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ :int = 1 / loss lowerCAmelCase__ :Tuple = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 100 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 ) save_model(_SCREAMING_SNAKE_CASE , args.output_dir ) def __A () ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=_SCREAMING_SNAKE_CASE , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=_SCREAMING_SNAKE_CASE , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=_SCREAMING_SNAKE_CASE , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=128 , type=_SCREAMING_SNAKE_CASE , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=_SCREAMING_SNAKE_CASE , help='Batch size.' ) parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 ) parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) lowerCAmelCase__ :Any = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCAmelCase__ :List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) lowerCAmelCase__ :Optional[int] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCAmelCase__ :Dict = torch.device('cuda' , args.local_rank ) lowerCAmelCase__ :Tuple = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCAmelCase__ :int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCAmelCase__ :Optional[Any] = nn.parallel.DistributedDataParallel( _SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_SCREAMING_SNAKE_CASE ) elif args.n_gpu > 1: lowerCAmelCase__ :Union[str, Any] = nn.DataParallel(_SCREAMING_SNAKE_CASE ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE ) # Prepare dataset lowerCAmelCase__ :Optional[int] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCAmelCase__ :Union[str, Any] = (torch.from_numpy(_SCREAMING_SNAKE_CASE ),) lowerCAmelCase__ :Optional[int] = TensorDataset(*_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = RandomSampler(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCAmelCase__ :Optional[Any] = mask_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) prune_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
293
1
a ={"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []} a =["""a""", """b""", """c""", """d""", """e"""] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: __lowerCamelCase : Optional[int] = start # add current to visited visited.append(lowerCamelCase__ ) __lowerCamelCase : Optional[Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __lowerCamelCase : str = topological_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # if all neighbors visited add current to sort sort.append(lowerCamelCase__ ) # if all vertices haven't been visited select a new one to visit if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): for vertice in vertices: if vertice not in visited: __lowerCamelCase : int = topological_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # return sort return sort if __name__ == "__main__": a =topological_sort("""a""", [], []) print(sort)
352
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a ={ """configuration_bigbird_pegasus""": [ """BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BigBirdPegasusConfig""", """BigBirdPegasusOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =[ """BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""", """BigBirdPegasusForCausalLM""", """BigBirdPegasusForConditionalGeneration""", """BigBirdPegasusForQuestionAnswering""", """BigBirdPegasusForSequenceClassification""", """BigBirdPegasusModel""", """BigBirdPegasusPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
113
0
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class A_ : def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=1_4 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : str=True , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : int=9_9 , UpperCAmelCase : List[str]=3_2 , UpperCAmelCase : int=4 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : int="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : Union[str, Any]=0.02 , ) -> Union[str, Any]: __lowerCAmelCase: Any = parent __lowerCAmelCase: Optional[int] = batch_size __lowerCAmelCase: Any = seq_length __lowerCAmelCase: List[str] = is_training __lowerCAmelCase: Optional[int] = use_input_mask __lowerCAmelCase: Union[str, Any] = use_token_type_ids __lowerCAmelCase: Union[str, Any] = use_labels __lowerCAmelCase: str = vocab_size __lowerCAmelCase: str = hidden_size __lowerCAmelCase: List[Any] = rotary_dim __lowerCAmelCase: List[Any] = num_hidden_layers __lowerCAmelCase: Tuple = num_attention_heads __lowerCAmelCase: int = intermediate_size __lowerCAmelCase: Optional[Any] = hidden_act __lowerCAmelCase: Dict = hidden_dropout_prob __lowerCAmelCase: List[str] = attention_probs_dropout_prob __lowerCAmelCase: Optional[Any] = max_position_embeddings __lowerCAmelCase: Tuple = initializer_range __lowerCAmelCase: Optional[int] = None __lowerCAmelCase: Dict = vocab_size - 1 __lowerCAmelCase: str = vocab_size - 1 __lowerCAmelCase: List[Any] = vocab_size - 1 def UpperCAmelCase ( self : str ) -> Optional[Any]: __lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase: Optional[Any] = None if self.use_input_mask: __lowerCAmelCase: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase: List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCAmelCase ( self : Dict ) -> List[Any]: __lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs() __lowerCAmelCase: Union[str, Any] = config_and_inputs __lowerCAmelCase: Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Union[str, Any]: __lowerCAmelCase: Any = 2_0 __lowerCAmelCase: Any = model_class_name(lowerCamelCase_ ) __lowerCAmelCase: List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) __lowerCAmelCase: Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) __lowerCAmelCase: Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCAmelCase: Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) __lowerCAmelCase: Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCAmelCase: str = model( input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , ) __lowerCAmelCase: Union[str, Any] = model(lowerCamelCase_ ) __lowerCAmelCase: int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[int]: __lowerCAmelCase: List[Any] = 2_0 __lowerCAmelCase: Dict = model_class_name(lowerCamelCase_ ) __lowerCAmelCase: Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowerCAmelCase: str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) __lowerCAmelCase: Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCAmelCase: Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) __lowerCAmelCase: Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCAmelCase: Dict = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) __lowerCAmelCase: Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ) __lowerCAmelCase: List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class A_ ( lowercase_ , lowercase_ , unittest.TestCase ): _lowercase : List[str] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () _lowercase : Union[str, Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCAmelCase ( self : str ) -> Tuple: __lowerCAmelCase: Union[str, Any] = FlaxGPTJModelTester(self ) def UpperCAmelCase ( self : Any ) -> Dict: for model_class_name in self.all_model_classes: __lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: __lowerCAmelCase: int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @tooslow def UpperCAmelCase ( self : List[Any] ) -> List[str]: __lowerCAmelCase: List[Any] = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) __lowerCAmelCase: List[Any] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase_ , truncation=lowerCamelCase_ ) __lowerCAmelCase: Optional[Any] = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCAmelCase: int = False __lowerCAmelCase: Optional[Any] = model.config.eos_token_id __lowerCAmelCase: str = jax.jit(model.generate ) __lowerCAmelCase: str = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences __lowerCAmelCase: Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) __lowerCAmelCase: List[Any] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) @is_pt_flax_cross_test def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCAmelCase: str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) __lowerCAmelCase: List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCAmelCase: List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCAmelCase: int = getattr(lowerCamelCase_ , lowerCamelCase_ ) __lowerCAmelCase: str = pt_inputs["""input_ids"""].shape __lowerCAmelCase: int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): __lowerCAmelCase: int = 0 __lowerCAmelCase: Optional[int] = 1 __lowerCAmelCase: List[Any] = 0 __lowerCAmelCase: Union[str, Any] = 1 __lowerCAmelCase: Optional[int] = pt_model_class(lowerCamelCase_ ).eval() __lowerCAmelCase: str = model_class(lowerCamelCase_ , dtype=jnp.floataa ) __lowerCAmelCase: Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ ) __lowerCAmelCase: Any = fx_state with torch.no_grad(): __lowerCAmelCase: Any = pt_model(**lowerCamelCase_ ).to_tuple() __lowerCAmelCase: Any = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase_ ) __lowerCAmelCase: List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) __lowerCAmelCase: str = fx_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCAmelCase: Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) __lowerCAmelCase: List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCAmelCase: Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCAmelCase: int = getattr(lowerCamelCase_ , lowerCamelCase_ ) __lowerCAmelCase: Tuple = pt_model_class(lowerCamelCase_ ).eval() __lowerCAmelCase: Any = model_class(lowerCamelCase_ , dtype=jnp.floataa ) __lowerCAmelCase: List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params ) __lowerCAmelCase: str = pt_inputs["""input_ids"""].shape __lowerCAmelCase: Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): __lowerCAmelCase: Union[str, Any] = 0 __lowerCAmelCase: Dict = 1 __lowerCAmelCase: Dict = 0 __lowerCAmelCase: Tuple = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowerCAmelCase: List[str] = pt_model(**lowerCamelCase_ ).to_tuple() __lowerCAmelCase: Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase_ ) __lowerCAmelCase: Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ ) with torch.no_grad(): __lowerCAmelCase: str = pt_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def UpperCAmelCase ( self : Optional[int] ) -> List[str]: for model_class_name in self.all_model_classes: __lowerCAmelCase: Union[str, Any] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCAmelCase: Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase_ )
322
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE__ = 10 def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = { """num_train_timesteps""": 2_01, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0] SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1] SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 1 scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler.timesteps SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCamelCase_ ): # 1. scale model input SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0] scheduler.set_timesteps(timesteps=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Dict = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0] with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCamelCase_ )
323
0
"""simple docstring""" from collections.abc import Sequence def lowercase ( lowerCAmelCase__ : Sequence[float] , lowerCAmelCase__ : bool = False ) -> float: if not arr: return 0 __a = 0 if allow_empty_subarrays else float('''-inf''' ) __a = 0.0 for num in arr: __a = max(0 if allow_empty_subarrays else num , curr_sum + num ) __a = max(lowerCAmelCase__ , lowerCAmelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowercase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
11
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowercase_ = None lowercase_ = logging.get_logger(__name__) lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} lowercase_ = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } lowercase_ = { "facebook/mbart-large-en-ro": 1_0_2_4, "facebook/mbart-large-cc25": 1_0_2_4, } # fmt: off lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : Tuple = VOCAB_FILES_NAMES __UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Tuple = ['input_ids', 'attention_mask'] __UpperCAmelCase : Optional[Any] = MBartTokenizer __UpperCAmelCase : List[int] = [] __UpperCAmelCase : List[int] = [] def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ): # Mask token behave like a normal word, i.e. include the space before it __a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token super().__init__( vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , ) __a = vocab_file __a = False if not self.vocab_file else True __a = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __a = { lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __a = src_lang if src_lang is not None else '''en_XX''' __a = self.convert_tokens_to_ids(self._src_lang ) __a = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __UpperCAmelCase ( self ): return self._src_lang @src_lang.setter def __UpperCAmelCase ( self , _a ): __a = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __UpperCAmelCase ( self , _a , _a = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCAmelCase ( self , _a , _a = None ): __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ): if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __a = src_lang __a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a ) __a = self.convert_tokens_to_ids(_a ) __a = tgt_lang_id return inputs def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ): __a = src_lang __a = tgt_lang return super().prepare_seqaseq_batch(_a , _a , **_a ) def __UpperCAmelCase ( self ): return self.set_src_lang_special_tokens(self.src_lang ) def __UpperCAmelCase ( self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __UpperCAmelCase ( self , _a ): __a = self.convert_tokens_to_ids(_a ) __a = [] __a = [self.eos_token_id, self.cur_lang_code] __a = self.convert_ids_to_tokens(self.prefix_tokens ) __a = self.convert_ids_to_tokens(self.suffix_tokens ) __a = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCAmelCase ( self , _a ): __a = self.convert_tokens_to_ids(_a ) __a = [] __a = [self.eos_token_id, self.cur_lang_code] __a = self.convert_ids_to_tokens(self.prefix_tokens ) __a = self.convert_ids_to_tokens(self.suffix_tokens ) __a = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCAmelCase ( self , _a , _a = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_a ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return __a = os.path.join( _a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
11
1
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = split_dict._to_yaml_list() assert len(snake_case__ ) == len(snake_case__ ) lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
338
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ : str = logging.get_logger(__name__) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Any = """AutoTokenizer""" UpperCAmelCase_ : Optional[int] = ["""tokenizer"""] UpperCAmelCase_ : str = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: super().__init__(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = speaker_embeddings @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple: if speaker_embeddings_dict_path is not None: lowerCAmelCase = get_file_from_repo( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if speaker_embeddings_path is None: logger.warning( F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." ) lowerCAmelCase = None else: with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json: lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = None lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int: if self.speaker_embeddings is not None: os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} lowerCAmelCase = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" ) lowerCAmelCase = tmp_dict with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.speaker_embeddings[voice_preset] lowerCAmelCase = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." ) lowerCAmelCase = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if path is None: raise ValueError( F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." ) lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) return voice_preset_dict def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple: for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F"Voice preset unrecognized, missing {key} as a key." ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int: if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if ( isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) else: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ): lowerCAmelCase = voice_preset + '''.npz''' lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) if voice_preset is not None: self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) if voice_preset is not None: lowerCAmelCase = voice_preset return encoded_text
338
1
"""simple docstring""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Any = logging.get_logger(__name__) _lowerCAmelCase : str = {"""vocab_file""": """vocab.txt"""} _lowerCAmelCase : str = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } _lowerCAmelCase : List[str] = { """openbmb/cpm-ant-10b""": 1_024, } def SCREAMING_SNAKE_CASE__ ( snake_case )-> str: '''simple docstring''' UpperCAmelCase__ : str = collections.OrderedDict() with open(__lowerCAmelCase , "r" , encoding="utf-8" ) as reader: UpperCAmelCase__ : List[Any] = reader.readlines() for index, token in enumerate(__lowerCAmelCase ): UpperCAmelCase__ : str = token.rstrip("\n" ) UpperCAmelCase__ : int = index return vocab class lowerCAmelCase__ ( __lowerCamelCase ): def __init__( self : str , snake_case__ : str , snake_case__ : int="<unk>" , snake_case__ : Tuple=2_0_0 ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = vocab UpperCAmelCase__ : str = unk_token UpperCAmelCase__ : Dict = max_input_chars_per_word def __a ( self : Tuple , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = list(__lowercase ) if len(__lowercase ) > self.max_input_chars_per_word: return [self.unk_token] UpperCAmelCase__ : List[Any] = 0 UpperCAmelCase__ : List[str] = [] while start < len(__lowercase ): UpperCAmelCase__ : Any = len(__lowercase ) UpperCAmelCase__ : Any = None while start < end: UpperCAmelCase__ : Tuple = ''''''.join(chars[start:end] ) if substr in self.vocab: UpperCAmelCase__ : Union[str, Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__lowercase ) UpperCAmelCase__ : Union[str, Any] = end return sub_tokens class lowerCAmelCase__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ =["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE_ =False def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : Dict="<d>" , snake_case__ : List[Any]="</d>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : List[str]="</s>" , snake_case__ : str="<pad>" , snake_case__ : Tuple="<unk>" , snake_case__ : Tuple="</n>" , snake_case__ : List[Any]="</_>" , snake_case__ : str="left" , **snake_case__ : Optional[Any] , ): '''simple docstring''' requires_backends(self , ["jieba"] ) super().__init__( bod_token=__lowercase , eod_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , unk_token=__lowercase , line_token=__lowercase , space_token=__lowercase , padding_side=__lowercase , **__lowercase , ) UpperCAmelCase__ : List[str] = bod_token UpperCAmelCase__ : List[Any] = eod_token UpperCAmelCase__ : List[Any] = load_vocab(__lowercase ) UpperCAmelCase__ : Any = self.encoder[space_token] UpperCAmelCase__ : Dict = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] UpperCAmelCase__ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()} UpperCAmelCase__ : Any = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def __a ( self : Optional[int] ): '''simple docstring''' return self.encoder[self.bod_token] @property def __a ( self : Union[str, Any] ): '''simple docstring''' return self.encoder[self.eod_token] @property def __a ( self : List[str] ): '''simple docstring''' return self.encoder["\n"] @property def __a ( self : Tuple ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : str , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for x in jieba.cut(__lowercase , cut_all=__lowercase ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowercase ) ) return output_tokens def __a ( self : Optional[Any] , snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = [i for i in token_ids if i >= 0] UpperCAmelCase__ : Optional[int] = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__lowercase , **__lowercase ) def __a ( self : int , snake_case__ : List[str] ): '''simple docstring''' return token in self.encoder def __a ( self : int , snake_case__ : List[str] ): '''simple docstring''' return "".join(__lowercase ) def __a ( self : Optional[int] , snake_case__ : Optional[int] ): '''simple docstring''' return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) ) def __a ( self : Tuple , snake_case__ : int ): '''simple docstring''' return self.decoder.get(__lowercase , self.unk_token ) def __a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if os.path.isdir(__lowercase ): UpperCAmelCase__ : int = os.path.join( __lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: UpperCAmelCase__ : str = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory UpperCAmelCase__ : List[str] = 0 if " " in self.encoder: UpperCAmelCase__ : Dict = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: UpperCAmelCase__ : Union[str, Any] = self.encoder['''\n'''] del self.encoder["\n"] UpperCAmelCase__ : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) with open(__lowercase , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' " Please check that the vocabulary is not corrupted!" ) UpperCAmelCase__ : str = token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def __a ( self : Tuple , snake_case__ : List[int] , snake_case__ : List[int] = None ): '''simple docstring''' if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase ) if token_ids_a is not None: return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) return [1] + ([0] * len(__lowercase ))
350
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
0
def lowerCamelCase__ ( _a , _a): _validate_point(_a) _validate_point(_a) if len(_a) != len(_a): raise ValueError("Both points must be in the same n-dimensional space") return float(sum(abs(a - b) for a, b in zip(_a , _a))) def lowerCamelCase__ ( _a): if point: if isinstance(_a , _a): for item in point: if not isinstance(_a , (int, float)): SCREAMING_SNAKE_CASE : List[Any] = ( "Expected a list of numbers as input, found " f"{type(_a).__name__}" ) raise TypeError(_a) else: SCREAMING_SNAKE_CASE : List[Any] = f"Expected a list of numbers as input, found {type(_a).__name__}" raise TypeError(_a) else: raise ValueError("Missing an input") def lowerCamelCase__ ( _a , _a): _validate_point(_a) _validate_point(_a) if len(_a) != len(_a): raise ValueError("Both points must be in the same n-dimensional space") return float(sum(abs(x - y) for x, y in zip(_a , _a))) if __name__ == "__main__": import doctest doctest.testmod()
76
from typing import Any class _UpperCamelCase : '''simple docstring''' def __init__( self : Dict , a : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : int = data SCREAMING_SNAKE_CASE : int = None def __repr__( self : str ) -> str: """simple docstring""" return F"Node({self.data})" class _UpperCamelCase : '''simple docstring''' def __init__( self : List[str] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = None def __iter__( self : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self : str ) -> int: """simple docstring""" return sum(1 for _ in self ) def __repr__( self : Optional[Any] ) -> str: """simple docstring""" return "->".join([str(a ) for item in self] ) def __getitem__( self : List[Any] , a : int ) -> Any: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Tuple , a : int , a : Any ) -> None: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) SCREAMING_SNAKE_CASE : str = self.head for _ in range(a ): SCREAMING_SNAKE_CASE : str = current.next SCREAMING_SNAKE_CASE : Any = data def __UpperCamelCase ( self : List[str] , a : Any ) -> None: """simple docstring""" self.insert_nth(len(self ) , a ) def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None: """simple docstring""" self.insert_nth(0 , a ) def __UpperCamelCase ( self : Optional[Any] , a : int , a : Any ) -> None: """simple docstring""" if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) SCREAMING_SNAKE_CASE : Any = Node(a ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Optional[int] = self.head # link new_node to head SCREAMING_SNAKE_CASE : List[Any] = new_node else: SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Optional[int] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next SCREAMING_SNAKE_CASE : int = new_node def __UpperCamelCase ( self : Optional[int] ) -> None: # print every node data """simple docstring""" print(self ) def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" return self.delete_nth(0 ) def __UpperCamelCase ( self : Any ) -> Any: # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def __UpperCamelCase ( self : List[str] , a : int = 0 ) -> Any: """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) SCREAMING_SNAKE_CASE : Tuple = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next.next return delete_node.data def __UpperCamelCase ( self : List[Any] ) -> bool: """simple docstring""" return self.head is None def __UpperCamelCase ( self : Optional[int] ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : str = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Any = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : List[Any] = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : Any = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : str = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : Optional[Any] = prev def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Union[str, Any] = LinkedList() assert linked_list.is_empty() is True assert str(_a) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10): assert len(_a) == i linked_list.insert_nth(_a , i + 1) assert str(_a) == "->".join(str(_a) for i in range(1 , 11)) linked_list.insert_head(0) linked_list.insert_tail(11) assert str(_a) == "->".join(str(_a) for i in range(0 , 12)) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9) == 10 assert linked_list.delete_tail() == 11 assert len(_a) == 9 assert str(_a) == "->".join(str(_a) for i in range(1 , 10)) assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True for i in range(0 , 9): SCREAMING_SNAKE_CASE : str = -i assert all(linked_list[i] == -i for i in range(0 , 9)) is True linked_list.reverse() assert str(_a) == "->".join(str(_a) for i in range(-8 , 1)) def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Optional[Any] = [ -9, 100, Node(77345112), "dlrow olleH", 7, 5555, 0, -192.5_5555, "Hello, world!", 77.9, Node(10), None, None, 12.20, ] SCREAMING_SNAKE_CASE : List[Any] = LinkedList() for i in test_input: linked_list.insert_tail(_a) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_a) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_head() assert result == -9 assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Any = linked_list.delete_tail() assert result == 12.2 assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : Any = linked_list.delete_nth(10) assert result is None assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!")) assert ( str(_a) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_a) assert ( str(_a) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_a) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowerCamelCase__ ( ): from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() linked_list.insert_head(input("Inserting 1st at head ").strip()) linked_list.insert_head(input("Inserting 2nd at head ").strip()) print("\nPrint list:") linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail ").strip()) linked_list.insert_tail(input("Inserting 2nd at tail ").strip()) print("\nPrint list:") linked_list.print_list() print("\nDelete head") linked_list.delete_head() print("Delete tail") linked_list.delete_tail() print("\nPrint list:") linked_list.print_list() print("\nReverse linked list") linked_list.reverse() print("\nPrint list:") linked_list.print_list() print("\nString representation of linked list:") print(_a) print("\nReading/changing Node data using indexing:") print(f"Element at Position 1: {linked_list[1]}") SCREAMING_SNAKE_CASE : Dict = input("Enter New Value: ").strip() print("New list:") print(_a) print(f"length of linked_list is : {len(_a)}") if __name__ == "__main__": main()
76
1
import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class __a : def __init__( self , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=None ) -> List[Any]: """simple docstring""" _UpperCAmelCase = np.random.default_rng(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = length _UpperCAmelCase = rng.normal(size=(length,) ).astype(np.floataa ) _UpperCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> List[Any]: """simple docstring""" return self.length def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return {"x": self.x[i], "y": self.y[i]} class __a ( torch.nn.Module ): def __init__( self , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=False ) -> str: """simple docstring""" super().__init__() _UpperCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) _UpperCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) _UpperCAmelCase = True def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) _UpperCAmelCase = False return x * self.a[0] + self.b[0] class __a ( torch.nn.Module ): def __init__( self , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" super().__init__() _UpperCAmelCase = torch.nn.Parameter(torch.tensor(_SCREAMING_SNAKE_CASE ).float() ) _UpperCAmelCase = torch.nn.Parameter(torch.tensor(_SCREAMING_SNAKE_CASE ).float() ) _UpperCAmelCase = True def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None ) -> List[str]: """simple docstring""" if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) _UpperCAmelCase = False return x * self.a + self.b def lowerCAmelCase__ ( a__: int , a__: int = 1_6 ) -> List[Any]: '''simple docstring''' from datasets import load_dataset from transformers import AutoTokenizer _UpperCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' ) _UpperCAmelCase = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'} _UpperCAmelCase = load_dataset('csv' , data_files=a__ ) _UpperCAmelCase = datasets['train'].unique('label' ) _UpperCAmelCase = {v: i for i, v in enumerate(a__ )} def tokenize_function(a__: str ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer( examples['sentence1'] , examples['sentence2'] , truncation=a__ , max_length=a__ , padding='max_length' ) if "label" in examples: _UpperCAmelCase = [label_to_id[l] for l in examples['label']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _UpperCAmelCase = datasets.map( a__ , batched=a__ , remove_columns=['sentence1', 'sentence2', 'label'] , ) def collate_fn(a__: Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(a__ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' ) return tokenizer.pad(a__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader(tokenized_datasets['train'] , shuffle=a__ , collate_fn=a__ , batch_size=2 ) _UpperCAmelCase = DataLoader(tokenized_datasets['validation'] , shuffle=a__ , collate_fn=a__ , batch_size=1 ) return train_dataloader, eval_dataloader
185
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class __a ( UpperCAmelCase ): _a : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
185
1
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib _a = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } _a = logging.WARNING def __a ( ): UpperCAmelCase_ : Dict = os.getenv("DATASETS_VERBOSITY", __lowerCamelCase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def __a ( ): return __name__.split("." )[0] def __a ( ): return logging.getLogger(_get_library_name() ) def __a ( ): # Apply our default configuration to the library root logger. UpperCAmelCase_ : int = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def __a ( ): UpperCAmelCase_ : int = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def __a ( __lowerCamelCase = None ): if name is None: UpperCAmelCase_ : str = _get_library_name() return logging.getLogger(__lowerCamelCase ) def __a ( ): return _get_library_root_logger().getEffectiveLevel() def __a ( __lowerCamelCase ): _get_library_root_logger().setLevel(__lowerCamelCase ) def __a ( ): return set_verbosity(__lowerCamelCase ) def __a ( ): return set_verbosity(__lowerCamelCase ) def __a ( ): return set_verbosity(__lowerCamelCase ) def __a ( ): return set_verbosity(__lowerCamelCase ) def __a ( ): UpperCAmelCase_ : Tuple = False def __a ( ): UpperCAmelCase_ : List[Any] = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class A_ : '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ): # pylint: disable=unused-argument """simple docstring""" UpperCAmelCase_ : Optional[Any] = args[0] if args else None def __iter__( self ): """simple docstring""" return iter(self._iterator ) def __getattr__( self , lowercase_ ): """simple docstring""" def empty_fn(*lowercase_ , **lowercase_ ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): """simple docstring""" return self def __exit__( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" return _a = True class A_ : '''simple docstring''' def __call__( self , *lowercase_ , lowercase_=False , **lowercase_ ): """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*lowercase_ , **lowercase_ ) else: return EmptyTqdm(*lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() _a = _tqdm_cls() def __a ( ): global _tqdm_active return bool(_tqdm_active ) def __a ( ): global _tqdm_active UpperCAmelCase_ : Tuple = True def __a ( ): global _tqdm_active UpperCAmelCase_ : int = False
61
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py _a = 'src/diffusers' # Matches is_xxx_available() _a = re.compile(R'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla _a = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') _a = '\n{0} = None\n' _a = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' _a = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def __a ( __lowerCamelCase ): UpperCAmelCase_ : int = _re_backend.findall(__lowerCamelCase ) if len(__lowerCamelCase ) == 0: return None return "_and_".join(__lowerCamelCase ) def __a ( ): with open(os.path.join(__lowerCamelCase, "__init__.py" ), "r", encoding="utf-8", newline="\n" ) as f: UpperCAmelCase_ : Optional[int] = f.readlines() # Get to the point we do the actual imports for type checking UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : Optional[int] = {} # Go through the end of the file while line_index < len(__lowerCamelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block UpperCAmelCase_ : Union[str, Any] = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("else:" ): line_index += 1 line_index += 1 UpperCAmelCase_ : List[str] = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCamelCase ) and len(lines[line_index] ) > 1: UpperCAmelCase_ : Union[str, Any] = lines[line_index] UpperCAmelCase_ : Optional[Any] = _re_single_line_import.search(__lowerCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCamelCase ) > 0: UpperCAmelCase_ : Optional[int] = objects else: line_index += 1 return backend_specific_objects def __a ( __lowerCamelCase, __lowerCamelCase ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCamelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCamelCase, __lowerCamelCase ) else: return DUMMY_CLASS.format(__lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase=None ): if backend_specific_objects is None: UpperCAmelCase_ : Tuple = read_init() # For special correspondence backend to module name as used in the function requires_modulename UpperCAmelCase_ : str = {} for backend, objects in backend_specific_objects.items(): UpperCAmelCase_ : int = "[" + ", ".join(f"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]" UpperCAmelCase_ : Dict = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCamelCase, __lowerCamelCase ) for o in objects] ) UpperCAmelCase_ : int = dummy_file return dummy_files def __a ( __lowerCamelCase=False ): UpperCAmelCase_ : Optional[Any] = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py UpperCAmelCase_ : Union[str, Any] = {"torch": "pt"} # Locate actual dummy modules and read their content. UpperCAmelCase_ : List[str] = os.path.join(__lowerCamelCase, "utils" ) UpperCAmelCase_ : Optional[int] = { backend: os.path.join(__lowerCamelCase, f"""dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py""" ) for backend in dummy_files.keys() } UpperCAmelCase_ : Any = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCamelCase ): with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f: UpperCAmelCase_ : Optional[int] = f.read() else: UpperCAmelCase_ : Any = "" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"""Updating diffusers.utils.dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py as the main """ "__init__ has new objects." ) with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( "The main __init__ has objects that are not present in " f"""diffusers.utils.dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py. Run `make fix-copies` """ "to fix this." ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') _a = parser.parse_args() check_dummies(args.fix_and_overwrite)
61
1
"""simple docstring""" from __future__ import annotations from math import gcd def lowercase ( _snake_case : int , _snake_case : int = 2 , _snake_case : int = 1 , _snake_case : int = 3 , ) ->int | None: """simple docstring""" if num < 2: raise ValueError('''The input value cannot be less than 2''' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_snake_case : int , _snake_case : int , _snake_case : int ) -> int: return (pow(lowerCAmelCase__ , 2 ) + step) % modulus for _ in range(lowerCAmelCase__ ): # These track the position within the cycle detection logic. __snake_case : int = seed __snake_case : Dict = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. __snake_case : List[Any] = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __snake_case : int = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __snake_case : int = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. __snake_case : Tuple = gcd(hare - tortoise , lowerCAmelCase__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. __snake_case : List[Any] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() parser.add_argument( """num""", type=int, help="""The value to find a divisor of""", ) parser.add_argument( """--attempts""", type=int, default=3, help="""The number of attempts before giving up""", ) SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() SCREAMING_SNAKE_CASE : Optional[Any] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F'{args.num} is probably prime') else: SCREAMING_SNAKE_CASE : Optional[Any] = args.num // divisor print(F'{args.num} = {divisor} * {quotient}')
365
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None if is_torch_available(): import torch from torch.utils.data import Dataset class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ): '''simple docstring''' __snake_case : Any = hans_processors[task]() __snake_case : int = os.path.join( a_ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) __snake_case : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Dict = label_list[2], label_list[1] __snake_case : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : int = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case : Union[str, Any] = torch.load(a_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case : Dict = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info('''Training examples: %s''' , len(a_ ) ) __snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info('''Saving features into cached file %s''' , a_ ) torch.save(self.features , a_ ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ): '''simple docstring''' __snake_case : List[Any] = hans_processors[task]() __snake_case : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Tuple = label_list[2], label_list[1] __snake_case : Dict = label_list __snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) __snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case : Union[str, Any] = tf.data.Dataset.from_generator( a_ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.dataset def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue __snake_case : Tuple = '''%s-%s''' % (set_type, line[0]) __snake_case : Dict = line[5] __snake_case : int = line[6] __snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case : List[Any] = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )} __snake_case : Tuple = [] for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , ) __snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0 __snake_case : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE : Dict = { """hans""": 3, } SCREAMING_SNAKE_CASE : str = { """hans""": HansProcessor, }
24
0
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _A ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): snake_case__ : int = BioGptTokenizer snake_case__ : List[Any] = False def A__ ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowercase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) lowercase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__lowerCAmelCase ) ) def A__ ( self , __lowerCAmelCase ): """simple docstring""" lowercase = """lower newer""" lowercase = """lower newer""" return input_text, output_text def A__ ( self ): """simple docstring""" lowercase = BioGptTokenizer(self.vocab_file , self.merges_file ) lowercase = """lower""" lowercase = ["""low""", """er</w>"""] lowercase = tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) lowercase = tokens + ["""<unk>"""] lowercase = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) @slow def A__ ( self ): """simple docstring""" lowercase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowerCAmelCase ) lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowerCAmelCase ) lowercase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) lowercase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
197
'''simple docstring''' from __future__ import annotations from collections.abc import Generator def _A () -> Generator[int, None, None]: '''simple docstring''' _a = {} _a = 2 while True: _a = factor_map.pop(lowerCAmelCase__ , lowerCAmelCase__ ) if factor: _a = factor + prime while x in factor_map: x += factor _a = factor else: _a = prime yield prime prime += 1 def _A (lowerCAmelCase__ :float = 1E10 ) -> int: '''simple docstring''' _a = sieve() _a = 1 while True: _a = next(lowerCAmelCase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowerCAmelCase__ ) n += 2 if __name__ == "__main__": print(solution())
168
0
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]: return EnvironmentCommand() class __snake_case ( __lowerCamelCase ): '''simple docstring''' @staticmethod def UpperCAmelCase__ ( A : ArgumentParser ): __snake_case: Optional[Any] = parser.add_parser("""env""" ) download_parser.set_defaults(func=A ) def UpperCAmelCase__ ( self : List[str] ): __snake_case: List[str] = huggingface_hub.__version__ __snake_case: Optional[int] = """not installed""" __snake_case: str = """NA""" if is_torch_available(): import torch __snake_case: Tuple = torch.__version__ __snake_case: Optional[int] = torch.cuda.is_available() __snake_case: int = """not installed""" if is_transformers_available(): import transformers __snake_case: List[Any] = transformers.__version__ __snake_case: Optional[Any] = """not installed""" if is_accelerate_available(): import accelerate __snake_case: Tuple = accelerate.__version__ __snake_case: Union[str, Any] = """not installed""" if is_xformers_available(): import xformers __snake_case: int = xformers.__version__ __snake_case: int = { """`diffusers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''', """Huggingface_hub version""": hub_version, """Transformers version""": transformers_version, """Accelerate version""": accelerate_version, """xFormers version""": xformers_version, """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(A ) ) return info @staticmethod def UpperCAmelCase__ ( A : List[Any] ): return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
293
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __snake_case ( unittest.TestCase ): '''simple docstring''' @parameterized.expand([(None,), ("""foo.json""",)] ) def UpperCAmelCase__ ( self : List[str] , A : Optional[Any] ): __snake_case: Any = GenerationConfig( do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(A , config_name=A ) __snake_case: Optional[int] = GenerationConfig.from_pretrained(A , config_name=A ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , A ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , A ) def UpperCAmelCase__ ( self : Dict ): __snake_case: str = AutoConfig.from_pretrained("""gpt2""" ) __snake_case: Any = GenerationConfig.from_model_config(A ) __snake_case: str = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(A , A ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def UpperCAmelCase__ ( self : str ): __snake_case: List[str] = GenerationConfig() __snake_case: Tuple = { """max_new_tokens""": 1_024, """foo""": """bar""", } __snake_case: List[str] = copy.deepcopy(A ) __snake_case: Optional[int] = generation_config.update(**A ) # update_kwargs was not modified (no side effects) self.assertEqual(A , A ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(A , {"""foo""": """bar"""} ) def UpperCAmelCase__ ( self : Tuple ): __snake_case: List[str] = GenerationConfig() __snake_case: Optional[int] = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(A ) __snake_case: Any = GenerationConfig.from_pretrained(A ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) __snake_case: int = GenerationConfig.from_model_config(A ) assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config def UpperCAmelCase__ ( self : Dict ): __snake_case: Dict = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , A ) self.assertEqual(default_config.num_beams , 1 ) __snake_case: Union[str, Any] = GenerationConfig( do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , A ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(A ) __snake_case: Tuple = GenerationConfig.from_pretrained(A , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , A ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __snake_case ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCAmelCase__ ( cls : List[str] ): __snake_case: Optional[int] = TOKEN HfFolder.save_token(A ) @classmethod def UpperCAmelCase__ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def UpperCAmelCase__ ( self : Tuple ): __snake_case: Optional[int] = GenerationConfig( do_sample=A , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) __snake_case: str = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token ) __snake_case: Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(A , getattr(A , A ) ) def UpperCAmelCase__ ( self : List[Any] ): __snake_case: Union[str, Any] = GenerationConfig( do_sample=A , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) __snake_case: int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token ) __snake_case: Optional[int] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(A , getattr(A , A ) )
293
1
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __UpperCAmelCase ( ctypes.Structure ): # _fields is a specific attr expected by ctypes __lowercase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: if os.name == "nt": _snake_case = CursorInfo() _snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) ) _snake_case = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: if os.name == "nt": _snake_case = CursorInfo() _snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) ) _snake_case = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: try: hide_cursor() yield finally: show_cursor()
42
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__: Optional[int] = logging.get_logger(__name__) def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]: UpperCAmelCase : Tuple = DPTConfig(embedding_type='''hybrid''' ) if "large" in checkpoint_url: UpperCAmelCase : Tuple = 1024 UpperCAmelCase : List[Any] = 4096 UpperCAmelCase : str = 24 UpperCAmelCase : List[Any] = 16 UpperCAmelCase : str = [5, 11, 17, 23] UpperCAmelCase : List[Any] = [256, 512, 1024, 1024] UpperCAmelCase : Tuple = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: UpperCAmelCase : Optional[Any] = 768 UpperCAmelCase : Tuple = [1, 1, 1, 0.5] UpperCAmelCase : int = [256, 512, 768, 768] UpperCAmelCase : Any = 150 UpperCAmelCase : Tuple = 16 UpperCAmelCase : Any = (1, 384, 384) UpperCAmelCase : Optional[Any] = False UpperCAmelCase : Tuple = '''project''' if "ade" in checkpoint_url: UpperCAmelCase : Any = True UpperCAmelCase : str = 768 UpperCAmelCase : Optional[int] = [1, 1, 1, 0.5] UpperCAmelCase : List[Any] = 150 UpperCAmelCase : List[Any] = 16 UpperCAmelCase : str = '''huggingface/label-files''' UpperCAmelCase : Tuple = '''ade20k-id2label.json''' UpperCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) ) UpperCAmelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : List[Any] = idalabel UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} UpperCAmelCase : Union[str, Any] = [1, 150, 480, 480] return config, expected_shape def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> int: UpperCAmelCase : List[str] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Tuple ) -> Any: if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCAmelCase : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' ) if "pretrained.model" in name: UpperCAmelCase : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' ) if "patch_embed" in name: UpperCAmelCase : int = name.replace('''patch_embed''' , '''''' ) if "pos_embed" in name: UpperCAmelCase : Tuple = name.replace('''pos_embed''' , '''position_embeddings''' ) if "attn.proj" in name: UpperCAmelCase : Any = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "proj" in name and "project" not in name: UpperCAmelCase : str = name.replace('''proj''' , '''projection''' ) if "blocks" in name: UpperCAmelCase : Any = name.replace('''blocks''' , '''layer''' ) if "mlp.fc1" in name: UpperCAmelCase : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' ) if "norm1" in name and "backbone" not in name: UpperCAmelCase : Dict = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name and "backbone" not in name: UpperCAmelCase : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "scratch.output_conv" in name: UpperCAmelCase : Tuple = name.replace('''scratch.output_conv''' , '''head''' ) if "scratch" in name: UpperCAmelCase : str = name.replace('''scratch''' , '''neck''' ) if "layer1_rn" in name: UpperCAmelCase : Dict = name.replace('''layer1_rn''' , '''convs.0''' ) if "layer2_rn" in name: UpperCAmelCase : int = name.replace('''layer2_rn''' , '''convs.1''' ) if "layer3_rn" in name: UpperCAmelCase : Tuple = name.replace('''layer3_rn''' , '''convs.2''' ) if "layer4_rn" in name: UpperCAmelCase : int = name.replace('''layer4_rn''' , '''convs.3''' ) if "refinenet" in name: UpperCAmelCase : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCAmelCase : str = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" ) if "out_conv" in name: UpperCAmelCase : List[str] = name.replace('''out_conv''' , '''projection''' ) if "resConfUnit1" in name: UpperCAmelCase : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' ) if "resConfUnit2" in name: UpperCAmelCase : Any = name.replace('''resConfUnit2''' , '''residual_layer2''' ) if "conv1" in name: UpperCAmelCase : Optional[int] = name.replace('''conv1''' , '''convolution1''' ) if "conv2" in name: UpperCAmelCase : Tuple = name.replace('''conv2''' , '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCAmelCase : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: UpperCAmelCase : int = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: UpperCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: UpperCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: UpperCAmelCase : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: UpperCAmelCase : List[str] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: UpperCAmelCase : Tuple = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: UpperCAmelCase : int = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: UpperCAmelCase : Optional[int] = name.replace('''pretrained''' , '''dpt''' ) if "bn" in name: UpperCAmelCase : Dict = name.replace('''bn''' , '''batch_norm''' ) if "head" in name: UpperCAmelCase : Any = name.replace('''head''' , '''head.head''' ) if "encoder.norm" in name: UpperCAmelCase : Optional[int] = name.replace('''encoder.norm''' , '''layernorm''' ) if "auxlayer" in name: UpperCAmelCase : Union[str, Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' ) if "backbone" in name: UpperCAmelCase : List[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' ) if ".." in name: UpperCAmelCase : Optional[int] = name.replace('''..''' , '''.''' ) if "stem.conv" in name: UpperCAmelCase : Optional[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' ) if "blocks" in name: UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' ) if "convolution" in name and "backbone" in name: UpperCAmelCase : List[Any] = name.replace('''convolution''' , '''conv''' ) if "layer" in name and "backbone" in name: UpperCAmelCase : List[str] = name.replace('''layer''' , '''layers''' ) if "backbone.bit.encoder.bit" in name: UpperCAmelCase : List[Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' ) if "embedder.conv" in name: UpperCAmelCase : List[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' ) if "backbone.bit.encoder.stem.norm" in name: UpperCAmelCase : Tuple = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' ) return name def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[Any]: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" ) UpperCAmelCase : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : Tuple = in_proj_weight[: config.hidden_size, :] UpperCAmelCase : int = in_proj_bias[: config.hidden_size] UpperCAmelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : str = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :] def snake_case_ ( ) -> List[str]: UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Any: UpperCAmelCase , UpperCAmelCase : int = get_dpt_config(_lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase , map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(_lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): UpperCAmelCase : Any = state_dict.pop(_lowerCAmelCase ) UpperCAmelCase : List[Any] = val # read in qkv matrices read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model UpperCAmelCase : Optional[Any] = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) model.eval() # Check outputs on an image UpperCAmelCase : int = 480 if '''ade''' in checkpoint_url else 384 UpperCAmelCase : List[Any] = DPTImageProcessor(size=_lowerCAmelCase ) UpperCAmelCase : Dict = prepare_img() UpperCAmelCase : Optional[int] = image_processor(_lowerCAmelCase , return_tensors='''pt''' ) # forward pass UpperCAmelCase : Any = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth if show_prediction: UpperCAmelCase : Dict = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCAmelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: model.push_to_hub('''ybelkada/dpt-hybrid-midas''' ) image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' ) if __name__ == "__main__": UpperCamelCase__: Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt", type=str, help="URL of the original DPT checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", ) parser.add_argument( "--model_name", default="dpt-large", type=str, help="Name of the model, in case you're pushing to the hub.", ) parser.add_argument( "--show_prediction", action="store_true", ) UpperCamelCase__: Tuple = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
23
0
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger lowerCAmelCase : str = get_logger(__name__) lowerCAmelCase : int = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n' class SCREAMING_SNAKE_CASE__ : @add_start_docstrings(A_ ) def __call__( self , A_ , A_ )-> jnp.ndarray: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE__ : @add_start_docstrings(A_ ) def __call__( self , A_ , A_ )-> jnp.ndarray: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE__ ( snake_case_): @add_start_docstrings(A_ ) def __call__( self , A_ , A_ , A_ , **A_ )-> jnp.ndarray: '''simple docstring''' for processor in self: UpperCamelCase = inspect.signature(processor.__call__ ).parameters if len(A_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' F'''{processor.__class__} are passed to the logits processor.''' ) UpperCamelCase = processor(A_ , A_ , A_ , **A_ ) else: UpperCamelCase = processor(A_ , A_ , A_ ) return scores class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ )-> List[Any]: '''simple docstring''' if not isinstance(A_ , A_ ) or not (temperature > 0): raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' ) UpperCamelCase = temperature def __call__( self , A_ , A_ , A_ )-> jnp.ndarray: '''simple docstring''' UpperCamelCase = scores / self.temperature return scores class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 )-> int: '''simple docstring''' if not isinstance(A_ , A_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(A_ , A_ ) or (min_tokens_to_keep < 1): raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) UpperCamelCase = top_p UpperCamelCase = filter_value UpperCamelCase = min_tokens_to_keep def __call__( self , A_ , A_ , A_ )-> jnp.ndarray: '''simple docstring''' UpperCamelCase , UpperCamelCase = lax.top_k(A_ , scores.shape[-1] ) UpperCamelCase = jnp.full_like(A_ , self.filter_value ) UpperCamelCase = jax.nn.softmax(A_ , axis=-1 ).cumsum(axis=-1 ) UpperCamelCase = cumulative_probs < self.top_p # include the token that is higher than top_p as well UpperCamelCase = jnp.roll(A_ , 1 ) score_mask |= score_mask.at[:, 0].set(A_ ) # min tokens to keep UpperCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(A_ ) UpperCamelCase = jnp.where(A_ , A_ , A_ ) UpperCamelCase = jax.lax.sort_key_val(A_ , A_ )[-1] return next_scores class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 )-> str: '''simple docstring''' if not isinstance(A_ , A_ ) or top_k <= 0: raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) UpperCamelCase = max(A_ , A_ ) UpperCamelCase = filter_value def __call__( self , A_ , A_ , A_ )-> jnp.ndarray: '''simple docstring''' UpperCamelCase , UpperCamelCase = scores.shape UpperCamelCase = jnp.full(batch_size * vocab_size , self.filter_value ) UpperCamelCase = min(self.top_k , scores.shape[-1] ) # Safety check UpperCamelCase , UpperCamelCase = lax.top_k(A_ , A_ ) UpperCamelCase = jnp.broadcast_to((jnp.arange(A_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() UpperCamelCase = topk_scores.flatten() UpperCamelCase = topk_indices.flatten() + shift UpperCamelCase = next_scores_flat.at[topk_indices_flat].set(A_ ) UpperCamelCase = next_scores_flat.reshape(A_ , A_ ) return next_scores class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ )-> Optional[int]: '''simple docstring''' UpperCamelCase = bos_token_id def __call__( self , A_ , A_ , A_ )-> jnp.ndarray: '''simple docstring''' UpperCamelCase = jnp.full(scores.shape , -float('inf' ) ) UpperCamelCase = 1 - jnp.bool_(cur_len - 1 ) UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.bos_token_id].set(0 ) , A_ ) return scores class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ , A_ )-> str: '''simple docstring''' UpperCamelCase = max_length UpperCamelCase = eos_token_id def __call__( self , A_ , A_ , A_ )-> jnp.ndarray: '''simple docstring''' UpperCamelCase = jnp.full(scores.shape , -float('inf' ) ) UpperCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 ) UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.eos_token_id].set(0 ) , A_ ) return scores class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ , A_ )-> List[str]: '''simple docstring''' if not isinstance(A_ , A_ ) or min_length < 0: raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(A_ , A_ ) or eos_token_id < 0: raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) UpperCamelCase = min_length UpperCamelCase = eos_token_id def __call__( self , A_ , A_ , A_ )-> jnp.ndarray: '''simple docstring''' UpperCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) UpperCamelCase = jnp.where(A_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , A_ ) return scores class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ , A_ )-> str: '''simple docstring''' UpperCamelCase = list(A_ ) UpperCamelCase = begin_index def __call__( self , A_ , A_ , A_ )-> Optional[int]: '''simple docstring''' UpperCamelCase = 1 - jnp.bool_(cur_len - self.begin_index ) UpperCamelCase = jnp.where(A_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , A_ ) return scores class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ )-> Dict: '''simple docstring''' UpperCamelCase = list(A_ ) def __call__( self , A_ , A_ , A_ )-> jnp.ndarray: '''simple docstring''' UpperCamelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) ) return scores class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ )-> List[Any]: '''simple docstring''' UpperCamelCase = dict(A_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. UpperCamelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: UpperCamelCase = force_token_array.at[index].set(A_ ) UpperCamelCase = jnp.intaa(A_ ) def __call__( self , A_ , A_ , A_ )-> jnp.ndarray: '''simple docstring''' def _force_token(A_ ): UpperCamelCase = scores.shape[0] UpperCamelCase = self.force_token_array[generation_idx] UpperCamelCase = jnp.ones_like(A_ , dtype=scores.dtype ) * -float('inf' ) UpperCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) UpperCamelCase = lax.dynamic_update_slice(A_ , A_ , (0, current_token) ) return new_scores UpperCamelCase = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(A_ ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ , A_ , A_ )-> List[Any]: '''simple docstring''' UpperCamelCase = generate_config.eos_token_id UpperCamelCase = generate_config.no_timestamps_token_id UpperCamelCase = generate_config.no_timestamps_token_id + 1 UpperCamelCase = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(A_ , 'max_initial_timestamp_index' ): UpperCamelCase = generate_config.max_initial_timestamp_index else: UpperCamelCase = model_config.vocab_size if self.max_initial_timestamp_index is None: UpperCamelCase = model_config.vocab_size def __call__( self , A_ , A_ , A_ )-> List[Any]: '''simple docstring''' UpperCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) ) def handle_pairs(A_ , A_ ): UpperCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , A_ , A_ ) UpperCamelCase = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A_ , ) UpperCamelCase = jnp.where((cur_len - self.begin_index) < 2 , A_ , A_ ) UpperCamelCase = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , A_ , A_ , ) return jnp.where( A_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , A_ , ) UpperCamelCase = jax.vmap(A_ )(A_ , A_ ) UpperCamelCase = jnp.where(cur_len == self.begin_index , A_ , A_ ) UpperCamelCase = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A_ , ) UpperCamelCase = self.timestamp_begin + self.max_initial_timestamp_index UpperCamelCase = jnp.where( A_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , A_ , ) # if sum of probability over timestamps is above any other token, sample timestamp UpperCamelCase = jax.nn.log_softmax(A_ , axis=-1 ) def handle_cumulative_probs(A_ , A_ ): UpperCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) UpperCamelCase = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , A_ , ) UpperCamelCase = jax.vmap(A_ )(A_ , A_ ) return scores
351
'''simple docstring''' from __future__ import annotations from statistics import mean def A_( A : list[int] , A : list[int] , A : int): UpperCamelCase = [0] * no_of_processes UpperCamelCase = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(A): UpperCamelCase = burst_time[i] UpperCamelCase = [] UpperCamelCase = 0 UpperCamelCase = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: UpperCamelCase = [] UpperCamelCase = -1 for i in range(A): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(A) if len(A) > 0: UpperCamelCase = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: UpperCamelCase = i total_time += burst_time[target_process] completed += 1 UpperCamelCase = 0 UpperCamelCase = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def A_( A : list[int] , A : int , A : list[int]): UpperCamelCase = [0] * no_of_processes for i in range(A): UpperCamelCase = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print('[TEST CASE 01]') lowerCAmelCase : int = 4 lowerCAmelCase : Any = [2, 5, 3, 7] lowerCAmelCase : int = [0, 0, 0, 0] lowerCAmelCase : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCAmelCase : Optional[Any] = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time') for i, process_id in enumerate(list(range(1, 5))): print( f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
251
0
from string import ascii_lowercase, ascii_uppercase def __UpperCamelCase ( lowerCAmelCase__ : Tuple ): if not sentence: return "" __a : int = dict(zip(_A , _A ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
216
__magic_name__: str = [0, 2, 4, 6, 8] __magic_name__: Optional[int] = [1, 3, 5, 7, 9] def UpperCamelCase ( _A, _A, _A, _A ): """simple docstring""" if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1, -1, -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 __magic_name__ : List[Any] = 0 for digit in range(10 ): __magic_name__ : Optional[int] = digit result += reversible_numbers( 0, (remainder + 2 * digit) // 10, _A, _A ) return result __magic_name__ : str = 0 for digita in range(10 ): __magic_name__ : Optional[Any] = digita if (remainder + digita) % 2 == 0: __magic_name__ : Tuple = ODD_DIGITS else: __magic_name__ : str = EVEN_DIGITS for digita in other_parity_digits: __magic_name__ : Tuple = digita result += reversible_numbers( remaining_length - 2, (remainder + digita + digita) // 10, _A, _A, ) return result def UpperCamelCase ( _A = 9 ): """simple docstring""" __magic_name__ : List[str] = 0 for length in range(1, max_power + 1 ): result += reversible_numbers(_A, 0, [0] * length, _A ) return result if __name__ == "__main__": print(F"""{solution() = }""")
342
0
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return base * power(SCREAMING_SNAKE_CASE__ , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('Raise base to the power of exponent using recursion...') lowercase_ = int(input('Enter the base: ').strip()) lowercase_ = int(input('Enter the exponent: ').strip()) lowercase_ = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents lowercase_ = 1 / result print(F"""{base} to the power of {exponent} is {result}""")
358
from __future__ import annotations def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Optional[Any] = 0.00 __lowerCamelCase : Tuple = 0 for resistor in resistors: if resistor <= 0: __lowerCamelCase : Union[str, Any] = f'Resistor at index {index} has a negative or zero value!' raise ValueError(SCREAMING_SNAKE_CASE__ ) first_sum += 1 / float(SCREAMING_SNAKE_CASE__ ) index += 1 return 1 / first_sum def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Union[str, Any] = 0.00 __lowerCamelCase : str = 0 for resistor in resistors: sum_r += resistor if resistor < 0: __lowerCamelCase : Any = f'Resistor at index {index} has a negative value!' raise ValueError(SCREAMING_SNAKE_CASE__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
194
0
'''simple docstring''' import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging __lowerCamelCase = logging.get_logger(__name__) def UpperCAmelCase__ ( ) -> Dict: # Get the sagemaker specific mp parameters from smp_options variable. A_ = os.getenv("""SM_HP_MP_PARAMETERS""", """{}""" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. A_ = json.loads(UpperCAmelCase__ ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. A_ = os.getenv("""SM_FRAMEWORK_PARAMS""", """{}""" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". A_ = json.loads(UpperCAmelCase__ ) if not mpi_options.get("""sagemaker_mpi_enabled""", UpperCAmelCase__ ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("""smdistributed""" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class A__ ( _snake_case ): lowercase = field( default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , ) def snake_case_ ( self ) -> Tuple: '''simple docstring''' super().__post_init__() warnings.warn( """`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """ """`TrainingArguments` instead.""" , UpperCamelCase__ , ) @cached_property def snake_case_ ( self ) -> "torch.device": '''simple docstring''' logger.info("""PyTorch: setting up devices""" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( """torch.distributed process group is initialized, but local_rank == -1. """ """In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" ) if self.no_cuda: A_ = torch.device("""cpu""" ) A_ = 0 elif is_sagemaker_model_parallel_available(): A_ = smp.local_rank() A_ = torch.device("""cuda""" , UpperCamelCase__ ) A_ = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta ) A_ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) ) A_ = torch.device("""cuda""" , self.local_rank ) A_ = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 A_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. A_ = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta ) A_ = torch.device("""cuda""" , self.local_rank ) A_ = 1 if device.type == "cuda": torch.cuda.set_device(UpperCamelCase__ ) return device @property def snake_case_ ( self ) -> Tuple: '''simple docstring''' if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def snake_case_ ( self ) -> str: '''simple docstring''' return not is_sagemaker_model_parallel_available() @property def snake_case_ ( self ) -> List[Any]: '''simple docstring''' return False
162
'''simple docstring''' import argparse import math import traceback import dateutil.parser as date_parser import requests def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]: A_ = {} A_ = job["""started_at"""] A_ = job["""completed_at"""] A_ = date_parser.parse(UpperCAmelCase__ ) A_ = date_parser.parse(UpperCAmelCase__ ) A_ = round((end_datetime - start_datetime).total_seconds() / 60.0 ) A_ = start A_ = end A_ = duration_in_min return job_info def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> Union[str, Any]: A_ = None if token is not None: A_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''} A_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' A_ = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json() A_ = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase__ ) for job in result["""jobs"""]} ) A_ = math.ceil((result["""total_count"""] - 1_00) / 1_00 ) for i in range(UpperCAmelCase__ ): A_ = requests.get(url + F'''&page={i + 2}''', headers=UpperCAmelCase__ ).json() job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase__ ) for job in result["""jobs"""]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') __lowerCamelCase = parser.parse_args() __lowerCamelCase = get_job_time(args.workflow_run_id) __lowerCamelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f"""{k}: {v['duration']}""")
162
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_clap''': [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapAudioConfig''', '''ClapConfig''', '''ClapTextConfig''', ], '''processing_clap''': ['''ClapProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapModel''', '''ClapPreTrainedModel''', '''ClapTextModel''', '''ClapTextModelWithProjection''', '''ClapAudioModel''', '''ClapAudioModelWithProjection''', ] lowerCamelCase__ = ['''ClapFeatureExtractor'''] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
369
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCAmelCase__ : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[str] = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
121
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller UpperCAmelCase__ : Any = 3 def lowerCamelCase__ ( a ) -> int: print('''Generating primitive root of p''' ) while True: _A: Union[str, Any] = random.randrange(3 , a ) if pow(a , 2 , a ) == 1: continue if pow(a , a , a ) == 1: continue return g def lowerCamelCase__ ( a ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print('''Generating prime p...''' ) _A: Dict = rabin_miller.generate_large_prime(a ) # select large prime number. _A: Any = primitive_root(a ) # one primitive root on modulo p. _A: Optional[Any] = random.randrange(3 , a ) # private_key -> have to be greater than 2 for safety. _A: Dict = cryptomath.find_mod_inverse(pow(a , a , a ) , a ) _A: Union[str, Any] = (key_size, e_a, e_a, p) _A: Union[str, Any] = (key_size, d) return public_key, private_key def lowerCamelCase__ ( a , a ) -> None: if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print('''\nWARNING:''' ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" '''Use a different name or delete these files and re-run this program.''' ) sys.exit() _A , _A: Any = generate_key(a ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" , '''w''' ) as fo: fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" , '''w''' ) as fo: fo.write(f"""{private_key[0]},{private_key[1]}""" ) def lowerCamelCase__ ( ) -> None: print('''Making key files...''' ) make_key_files('''elgamal''' , 20_48 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
121
1
"""simple docstring""" def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('multiplicative_persistence() only accepts integral values' ) if num < 0: raise ValueError('multiplicative_persistence() does not accept negative values' ) A__ = 0 A__ = str(UpperCamelCase__ ) while len(UpperCamelCase__ ) != 1: A__ = [int(UpperCamelCase__ ) for i in num_string] A__ = 1 for i in range(0 , len(UpperCamelCase__ ) ): total *= numbers[i] A__ = str(UpperCamelCase__ ) steps += 1 return steps def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('additive_persistence() only accepts integral values' ) if num < 0: raise ValueError('additive_persistence() does not accept negative values' ) A__ = 0 A__ = str(UpperCamelCase__ ) while len(UpperCamelCase__ ) != 1: A__ = [int(UpperCamelCase__ ) for i in num_string] A__ = 0 for i in range(0 , len(UpperCamelCase__ ) ): total += numbers[i] A__ = str(UpperCamelCase__ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
154
"""simple docstring""" import os def UpperCAmelCase ( ): """simple docstring""" with open(os.path.dirname(UpperCamelCase__ ) + '/grid.txt' ) as f: A__ = [] # noqa: E741 for _ in range(20 ): l.append([int(UpperCamelCase__ ) for x in f.readline().split()] ) A__ = 0 # right for i in range(20 ): for j in range(17 ): A__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: A__ = temp # down for i in range(17 ): for j in range(20 ): A__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: A__ = temp # diagonal 1 for i in range(17 ): for j in range(17 ): A__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: A__ = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): A__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: A__ = temp return maximum if __name__ == "__main__": print(solution())
154
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _A = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['GLPNFeatureExtractor'] _A = ['GLPNImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST', 'GLPNForDepthEstimation', 'GLPNLayer', 'GLPNModel', 'GLPNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class A__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> Dict: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file: __lowerCAmelCase : List[str] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)") __lowerCAmelCase : List[Any] = input_file.read() __lowerCAmelCase : Any = regexp.search(_SCREAMING_SNAKE_CASE) return match def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str) -> Optional[Any]: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file: __lowerCAmelCase : Any = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL) __lowerCAmelCase : Optional[int] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` __lowerCAmelCase : int = regexp.finditer(_SCREAMING_SNAKE_CASE) __lowerCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1) is not None] return matches[0] if matches else None def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]: """simple docstring""" __lowerCAmelCase : Optional[Any] = Path("./datasets") __lowerCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py")) for dataset in dataset_files: if self._no_encoding_on_file_open(str(_SCREAMING_SNAKE_CASE)): raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""") def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Dict = Path("./datasets") __lowerCAmelCase : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py")) for dataset in dataset_files: if self._no_print_statements(str(_SCREAMING_SNAKE_CASE)): raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""")
269
0
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple=True ): model.train() a__ = model(__lowerCAmelCase ) a__ = F.mse_loss(__lowerCAmelCase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(__lowerCAmelCase ) def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any]=False ): set_seed(4_2 ) a__ = RegressionModel() a__ = deepcopy(__lowerCAmelCase ) a__ = RegressionDataset(length=8_0 ) a__ = DataLoader(__lowerCAmelCase , batch_size=1_6 ) model.to(accelerator.device ) if sched: a__ = AdamW(params=model.parameters() , lr=1E-3 ) a__ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) a__ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 ) a__ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 ) # Make a copy of `model` if sched: a__ , a__ , a__ , a__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: a__ , a__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __lowercase ( __lowerCAmelCase : List[Any] ): # Test when on a single CPU or GPU that the context manager does nothing a__ , a__ , a__ = get_training_setup(__lowerCAmelCase ) # Use a single batch a__ , a__ = next(iter(__lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a__ , a__ = accelerator.gather((ddp_input, ddp_target) ) a__ , a__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: # Sync grads step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) a__ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )] def __lowercase ( __lowerCAmelCase : Optional[int] ): # Test on distributed setup that context manager behaves properly a__ , a__ , a__ = get_training_setup(__lowerCAmelCase ) # Use a single batch a__ , a__ = next(iter(__lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a__ , a__ = accelerator.gather((ddp_input, ddp_target) ) a__ , a__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: # Sync grads step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) a__ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )] def __lowercase ( __lowerCAmelCase : Any=False , __lowerCAmelCase : Optional[Any]=False ): a__ = Accelerator( split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a__ , a__ , a__ = get_training_setup(__lowerCAmelCase ) for iteration, batch in enumerate(__lowerCAmelCase ): a__ , a__ = batch.values() # Gather the distributed inputs and targs for the base model a__ , a__ = accelerator.gather((ddp_input, ddp_target) ) a__ , a__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) a__ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )] GradientState._reset_state() def __lowercase ( __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=False ): a__ = Accelerator( split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a__ , a__ , a__ , a__ , a__ , a__ , a__ = get_training_setup(__lowerCAmelCase , __lowerCAmelCase ) for iteration, batch in enumerate(__lowerCAmelCase ): a__ , a__ = batch.values() # Gather the distributed inputs and targs for the base model a__ , a__ = accelerator.gather((ddp_input, ddp_target) ) a__ , a__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' a__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase )) if accelerator.num_processes > 1: check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def __lowercase ( ): a__ = Accelerator() a__ = RegressionDataset(length=8_0 ) a__ = DataLoader(__lowerCAmelCase , batch_size=1_6 ) a__ = RegressionDataset(length=9_6 ) a__ = DataLoader(__lowerCAmelCase , batch_size=1_6 ) a__ , a__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(__lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase ) if iteration < len(__lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(__lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase ) if batch_num < len(__lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __lowercase ( ): a__ = Accelerator() a__ = accelerator.state if state.local_process_index == 0: print('**Test `accumulate` gradient accumulation with dataloader break**' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('**Test NOOP `no_sync` context manager**' ) test_noop_sync(__lowerCAmelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('**Test Distributed `no_sync` context manager**' ) test_distributed_sync(__lowerCAmelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation, ' , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase ) def __lowercase ( __lowerCAmelCase : Any ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
356
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : Tuple = logging.get_logger(__name__) snake_case : List[Any] = { '''snap-research/efficientformer-l1-300''': ( '''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json''' ), } class snake_case_ (lowerCamelCase_ ): UpperCAmelCase__ : int = '''efficientformer''' def __init__( self :List[str] ,__snake_case :List[int] = [3, 2, 6, 4] ,__snake_case :List[int] = [48, 96, 2_24, 4_48] ,__snake_case :List[bool] = [True, True, True, True] ,__snake_case :int = 4_48 ,__snake_case :int = 32 ,__snake_case :int = 4 ,__snake_case :int = 7 ,__snake_case :int = 5 ,__snake_case :int = 8 ,__snake_case :int = 4 ,__snake_case :float = 0.0 ,__snake_case :int = 16 ,__snake_case :int = 3 ,__snake_case :int = 3 ,__snake_case :int = 3 ,__snake_case :int = 2 ,__snake_case :int = 1 ,__snake_case :float = 0.0 ,__snake_case :int = 1 ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :float = 1E-5 ,__snake_case :str = "gelu" ,__snake_case :float = 0.02 ,__snake_case :float = 1E-12 ,__snake_case :int = 2_24 ,__snake_case :float = 1E-05 ,**__snake_case :Dict ,) -> None: super().__init__(**__snake_case ) a__ = hidden_act a__ = hidden_dropout_prob a__ = hidden_sizes a__ = num_hidden_layers a__ = num_attention_heads a__ = initializer_range a__ = layer_norm_eps a__ = patch_size a__ = num_channels a__ = depths a__ = mlp_expansion_ratio a__ = downsamples a__ = dim a__ = key_dim a__ = attention_ratio a__ = resolution a__ = pool_size a__ = downsample_patch_size a__ = downsample_stride a__ = downsample_pad a__ = drop_path_rate a__ = num_metaad_blocks a__ = distillation a__ = use_layer_scale a__ = layer_scale_init_value a__ = image_size a__ = batch_norm_eps
109
0
import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__ : """simple docstring""" def __init__( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Tuple=10 , __lowerCamelCase : List[str]=[10, 20, 30, 40] , __lowerCamelCase : int=[1, 1, 2, 1] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=True , __lowerCamelCase : Union[str, Any]="relu" , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Any=None , ) -> Optional[Any]: a = parent a = batch_size a = image_size a = num_channels a = embeddings_size a = hidden_sizes a = depths a = is_training a = use_labels a = hidden_act a = num_labels a = scope a = len(__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self : Any ) -> List[Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> Union[str, Any]: a = RegNetModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __UpperCAmelCase ( self : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: a = self.num_labels a = RegNetForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( {"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : Any = False SCREAMING_SNAKE_CASE_ : int = False SCREAMING_SNAKE_CASE_ : str = False SCREAMING_SNAKE_CASE_ : str = False def __UpperCAmelCase ( self : Any ) -> Any: a = RegNetModelTester(self ) a = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def __UpperCAmelCase ( self : Dict ) -> Any: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCAmelCase ( self : List[str] ) -> List[Any]: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def __UpperCAmelCase ( self : str ) -> Optional[Any]: pass @unittest.skip(reason="RegNet does not support input and output embeddings" ) def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: pass def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCamelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def __UpperCAmelCase ( self : int ) -> List[str]: a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def __UpperCAmelCase ( self : Dict ) -> Any: def check_hidden_states_output(__lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] ): a = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) a , a = self.model_tester.prepare_config_and_inputs_for_common() a = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: a = layer_type a = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Any ) -> List[str]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def __UpperCAmelCase ( self : Optional[int] ) -> Any: for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = RegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __magic_name__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class snake_case__ (unittest.TestCase ): """simple docstring""" @cached_property def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple: a = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCamelCase ) # verify the logits a = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) a = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
107
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _A = 1_6 _A = 3_2 def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = 16 ) -> List[str]: lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase__ : List[str] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase__ : str = datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase__ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase__ : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase__ : Optional[Any] = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase__ : Any = 8 else: lowerCAmelCase__ : Any = None return tokenizer.pad( __UpperCAmelCase , padding="""longest""" , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCAmelCase__ : Any = DataLoader( tokenized_datasets["""train"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _A = mocked_dataloaders # noqa: F811 def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict: # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCAmelCase ) == "1": lowerCAmelCase__ : List[Any] = 2 # New Code # lowerCAmelCase__ : Tuple = int(args.gradient_accumulation_steps ) # Initialize accelerator lowerCAmelCase__ : Union[str, Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase__ : Tuple = config["""lr"""] lowerCAmelCase__ : int = int(config["""num_epochs"""] ) lowerCAmelCase__ : List[Any] = int(config["""seed"""] ) lowerCAmelCase__ : Tuple = int(config["""batch_size"""] ) lowerCAmelCase__ : Optional[int] = evaluate.load("""glue""" , """mrpc""" ) set_seed(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCAmelCase ) # Instantiate scheduler lowerCAmelCase__ : Optional[Any] = get_linear_schedule_with_warmup( optimizer=__UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.prepare( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Now we train the model for epoch in range(__UpperCAmelCase ): model.train() for step, batch in enumerate(__UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__UpperCAmelCase ): lowerCAmelCase__ : str = model(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = output.loss accelerator.backward(__UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase__ : int = model(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = outputs.logits.argmax(dim=-1 ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__UpperCAmelCase , references=__UpperCAmelCase , ) lowerCAmelCase__ : Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __UpperCAmelCase ) def lowercase_ ( ) -> Any: lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=__UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowerCAmelCase__ : List[str] = parser.parse_args() lowerCAmelCase__ : Any = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": main()
242
0
from __future__ import annotations UpperCamelCase = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def _A ( lowerCAmelCase_ : list[list[int]] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : list[list[int]] , ) -> Dict: """simple docstring""" lowerCAmelCase__ = [ [0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) ) ] # the reference grid lowerCAmelCase__ = 1 lowerCAmelCase__ = [ [0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) ) ] # the action grid lowerCAmelCase__ = init[0] lowerCAmelCase__ = init[1] lowerCAmelCase__ = 0 lowerCAmelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell lowerCAmelCase__ = [[f, g, x, y]] lowerCAmelCase__ = False # flag that is set when search is complete lowerCAmelCase__ = False # flag set if we can't find expand while not found and not resign: if len(lowerCAmelCase_ ) == 0: raise ValueError("Algorithm is unable to find solution" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() lowerCAmelCase__ = cell.pop() lowerCAmelCase__ = next_cell[2] lowerCAmelCase__ = next_cell[3] lowerCAmelCase__ = next_cell[1] if x == goal[0] and y == goal[1]: lowerCAmelCase__ = True else: for i in range(len(lowerCAmelCase_ ) ): # to try out different valid actions lowerCAmelCase__ = x + DIRECTIONS[i][0] lowerCAmelCase__ = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(lowerCAmelCase_ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: lowerCAmelCase__ = g + cost lowerCAmelCase__ = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) lowerCAmelCase__ = 1 lowerCAmelCase__ = i lowerCAmelCase__ = [] lowerCAmelCase__ = goal[0] lowerCAmelCase__ = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: lowerCAmelCase__ = x - DIRECTIONS[action[x][y]][0] lowerCAmelCase__ = y - DIRECTIONS[action[x][y]][1] lowerCAmelCase__ = xa lowerCAmelCase__ = ya invpath.append([x, y] ) lowerCAmelCase__ = [] for i in range(len(lowerCAmelCase_ ) ): path.append(invpath[len(lowerCAmelCase_ ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCamelCase = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCamelCase = [0, 0] # all coordinates are given in format [y,x] UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1] UpperCamelCase = 1 # the cost map which pushes the path closer to the goal UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCamelCase = 99 UpperCamelCase , UpperCamelCase = search(grid, init, goal, cost, heuristic) print('ACTION MAP') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
361
def _A ( lowerCAmelCase_ : int = 1000 ): """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
221
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) a_ = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['PerceiverFeatureExtractor'] a_ = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase__ ( _a , _a): # Load checkpoint SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu") SCREAMING_SNAKE_CASE : Dict = chkpt["model"] # We have the base model one level deeper than the original XLM repository SCREAMING_SNAKE_CASE : Optional[int] = {} for k, v in state_dict.items(): if "pred_layer" in k: SCREAMING_SNAKE_CASE : List[str] = v else: SCREAMING_SNAKE_CASE : int = v SCREAMING_SNAKE_CASE : int = chkpt["params"] SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))} SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"] SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()} # Save pytorch-model SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(_a , _a) print(f"Save configuration file to {pytorch_config_dump_path}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , indent=2) + "\n") print(f"Save vocab file to {pytorch_config_dump_path}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , indent=2) + "\n") if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
76
1
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A__ ( _lowerCamelCase): A_ : Union[str, Any] = (UnCLIPScheduler,) def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[str] = { 'num_train_timesteps': 10_00, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**_SCREAMING_SNAKE_CASE ) return config def __lowerCamelCase ( self ): for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): for time_step in [0, 5_00, 9_99]: for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = self.scheduler_classes[0] __lowerCAmelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log' ) __lowerCAmelCase : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5 def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.scheduler_classes[0] __lowerCAmelCase : Tuple = self.get_scheduler_config(variance_type='learned_range' ) __lowerCAmelCase : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = 0.5 assert scheduler._get_variance(1 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -10.171_2790 < 1E-5 assert scheduler._get_variance(4_87 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -5.799_8052 < 1E-5 assert scheduler._get_variance(9_99 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -0.001_0011 < 1E-5 def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = self.scheduler_classes[0] __lowerCAmelCase : List[str] = self.get_scheduler_config() __lowerCAmelCase : Any = scheduler_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = scheduler.timesteps __lowerCAmelCase : List[str] = self.dummy_model() __lowerCAmelCase : Tuple = self.dummy_sample_deter __lowerCAmelCase : int = torch.manual_seed(0 ) for i, t in enumerate(_SCREAMING_SNAKE_CASE ): # 1. predict noise residual __lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 __lowerCAmelCase : Optional[int] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample __lowerCAmelCase : int = pred_prev_sample __lowerCAmelCase : List[str] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : str = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 252.268_2495 ) < 1E-2 assert abs(result_mean.item() - 0.328_4743 ) < 1E-3 def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.scheduler_classes[0] __lowerCAmelCase : Tuple = self.get_scheduler_config() __lowerCAmelCase : Optional[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(25 ) __lowerCAmelCase : int = scheduler.timesteps __lowerCAmelCase : str = self.dummy_model() __lowerCAmelCase : str = self.dummy_sample_deter __lowerCAmelCase : Any = torch.manual_seed(0 ) for i, t in enumerate(_SCREAMING_SNAKE_CASE ): # 1. predict noise residual __lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i + 1 == timesteps.shape[0]: __lowerCAmelCase : Union[str, Any] = None else: __lowerCAmelCase : Optional[Any] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowerCAmelCase : Union[str, Any] = scheduler.step( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample __lowerCAmelCase : int = pred_prev_sample __lowerCAmelCase : Tuple = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : int = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 258.204_4983 ) < 1E-2 assert abs(result_mean.item() - 0.336_2038 ) < 1E-3 def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): pass
182
"""simple docstring""" lowerCamelCase__ = """0.21.0""" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
182
1